aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig37
-rw-r--r--arch/arm/boot/compressed/Makefile10
-rw-r--r--arch/arm/boot/compressed/head-shmobile.S12
-rw-r--r--arch/arm/boot/compressed/head.S3
-rw-r--r--arch/arm/boot/compressed/mmcif-sh7372.c2
-rw-r--r--arch/arm/boot/compressed/sdhi-sh7372.c95
-rw-r--r--arch/arm/boot/compressed/sdhi-shmobile.c449
-rw-r--r--arch/arm/boot/compressed/sdhi-shmobile.h11
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.in12
-rw-r--r--arch/arm/common/dmabounce.c193
-rw-r--r--arch/arm/common/gic.c7
-rw-r--r--arch/arm/common/it8152.c16
-rw-r--r--arch/arm/common/sa1111.c60
-rw-r--r--arch/arm/include/asm/bitops.h4
-rw-r--r--arch/arm/include/asm/dma-mapping.h88
-rw-r--r--arch/arm/include/asm/entry-macro-multi.S14
-rw-r--r--arch/arm/include/asm/memory.h12
-rw-r--r--arch/arm/include/asm/pmu.h2
-rw-r--r--arch/arm/include/asm/proc-fns.h14
-rw-r--r--arch/arm/include/asm/scatterlist.h4
-rw-r--r--arch/arm/include/asm/setup.h8
-rw-r--r--arch/arm/include/asm/suspend.h22
-rw-r--r--arch/arm/include/asm/tcm.h2
-rw-r--r--arch/arm/include/asm/tlbflush.h58
-rw-r--r--arch/arm/include/asm/traps.h3
-rw-r--r--arch/arm/kernel/entry-armv.S277
-rw-r--r--arch/arm/kernel/entry-header.S19
-rw-r--r--arch/arm/kernel/head-nommu.S8
-rw-r--r--arch/arm/kernel/head.S8
-rw-r--r--arch/arm/kernel/hw_breakpoint.c12
-rw-r--r--arch/arm/kernel/irq.c51
-rw-r--r--arch/arm/kernel/perf_event.c10
-rw-r--r--arch/arm/kernel/pmu.c87
-rw-r--r--arch/arm/kernel/setup.c101
-rw-r--r--arch/arm/kernel/sleep.S84
-rw-r--r--arch/arm/kernel/smp.c11
-rw-r--r--arch/arm/kernel/smp_scu.c2
-rw-r--r--arch/arm/kernel/smp_twd.c2
-rw-r--r--arch/arm/kernel/tcm.c68
-rw-r--r--arch/arm/kernel/vmlinux.lds.S126
-rw-r--r--arch/arm/mach-bcmring/include/mach/entry-macro.S4
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c4
-rw-r--r--arch/arm/mach-davinci/gpio.c21
-rw-r--r--arch/arm/mach-davinci/include/mach/entry-macro.S3
-rw-r--r--arch/arm/mach-davinci/irq.c8
-rw-r--r--arch/arm/mach-ep93xx/core.c4
-rw-r--r--arch/arm/mach-exynos4/cpu.c6
-rw-r--r--arch/arm/mach-exynos4/dev-audio.c2
-rw-r--r--arch/arm/mach-exynos4/headsmp.S2
-rw-r--r--arch/arm/mach-exynos4/mach-smdkv310.c8
-rw-r--r--arch/arm/mach-exynos4/platsmp.c8
-rw-r--r--arch/arm/mach-exynos4/pm.c2
-rw-r--r--arch/arm/mach-exynos4/sleep.S22
-rw-r--r--arch/arm/mach-h720x/include/mach/entry-macro.S3
-rw-r--r--arch/arm/mach-ixp4xx/common-pci.c12
-rw-r--r--arch/arm/mach-ixp4xx/common.c10
-rw-r--r--arch/arm/mach-lpc32xx/include/mach/entry-macro.S4
-rw-r--r--arch/arm/mach-mmp/pxa168.c2
-rw-r--r--arch/arm/mach-mmp/pxa910.c2
-rw-r--r--arch/arm/mach-msm/platsmp.c8
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c8
-rw-r--r--arch/arm/mach-omap1/gpio15xx.c4
-rw-r--r--arch/arm/mach-omap1/gpio16xx.c10
-rw-r--r--arch/arm/mach-omap1/gpio7xx.c14
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c2
-rw-r--r--arch/arm/mach-omap2/control.c7
-rw-r--r--arch/arm/mach-omap2/control.h6
-rw-r--r--arch/arm/mach-omap2/include/mach/entry-macro.S3
-rw-r--r--arch/arm/mach-omap2/omap-smp.c8
-rw-r--r--arch/arm/mach-omap2/pm.h22
-rw-r--r--arch/arm/mach-omap2/pm34xx.c80
-rw-r--r--arch/arm/mach-omap2/sleep34xx.S518
-rw-r--r--arch/arm/mach-pnx4008/include/mach/entry-macro.S5
-rw-r--r--arch/arm/mach-pxa/include/mach/pm.h4
-rw-r--r--arch/arm/mach-pxa/mfp-pxa2xx.c4
-rw-r--r--arch/arm/mach-pxa/palmz72.c1
-rw-r--r--arch/arm/mach-pxa/pm.c1
-rw-r--r--arch/arm/mach-pxa/pxa25x.c3
-rw-r--r--arch/arm/mach-pxa/pxa27x.c11
-rw-r--r--arch/arm/mach-pxa/pxa3xx.c14
-rw-r--r--arch/arm/mach-pxa/raumfeld.c36
-rw-r--r--arch/arm/mach-pxa/sleep.S55
-rw-r--r--arch/arm/mach-pxa/zeus.c3
-rw-r--r--arch/arm/mach-realview/Kconfig1
-rw-r--r--arch/arm/mach-realview/platsmp.c8
-rw-r--r--arch/arm/mach-s3c2412/pm.c6
-rw-r--r--arch/arm/mach-s3c2416/pm.c6
-rw-r--r--arch/arm/mach-s3c2440/mach-mini2440.c2
-rw-r--r--arch/arm/mach-s3c64xx/dev-spi.c2
-rw-r--r--arch/arm/mach-s3c64xx/dma.c14
-rw-r--r--arch/arm/mach-s3c64xx/pm.c2
-rw-r--r--arch/arm/mach-s3c64xx/sleep.S23
-rw-r--r--arch/arm/mach-s5p64x0/dev-spi.c4
-rw-r--r--arch/arm/mach-s5pc100/dev-spi.c4
-rw-r--r--arch/arm/mach-s5pv210/dev-spi.c2
-rw-r--r--arch/arm/mach-s5pv210/pm.c2
-rw-r--r--arch/arm/mach-s5pv210/sleep.S21
-rw-r--r--arch/arm/mach-sa1100/pm.c7
-rw-r--r--arch/arm/mach-sa1100/sleep.S19
-rw-r--r--arch/arm/mach-shark/include/mach/entry-macro.S10
-rw-r--r--arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h21
-rw-r--r--arch/arm/mach-shmobile/include/mach/sdhi.h16
-rw-r--r--arch/arm/mach-shmobile/platsmp.c5
-rw-r--r--arch/arm/mach-tegra/platsmp.c8
-rw-r--r--arch/arm/mach-ux500/platsmp.c8
-rw-r--r--arch/arm/mach-vexpress/ct-ca9x4.c4
-rw-r--r--arch/arm/mach-vt8500/irq.c21
-rw-r--r--arch/arm/mm/abort-ev4.S17
-rw-r--r--arch/arm/mm/abort-ev4t.S17
-rw-r--r--arch/arm/mm/abort-ev5t.S19
-rw-r--r--arch/arm/mm/abort-ev5tj.S25
-rw-r--r--arch/arm/mm/abort-ev6.S25
-rw-r--r--arch/arm/mm/abort-ev7.S25
-rw-r--r--arch/arm/mm/abort-lv4t.S141
-rw-r--r--arch/arm/mm/abort-macro.S34
-rw-r--r--arch/arm/mm/abort-nommu.S10
-rw-r--r--arch/arm/mm/alignment.c3
-rw-r--r--arch/arm/mm/cache-l2x0.c19
-rw-r--r--arch/arm/mm/copypage-v6.c1
-rw-r--r--arch/arm/mm/dma-mapping.c35
-rw-r--r--arch/arm/mm/fault.c6
-rw-r--r--arch/arm/mm/init.c31
-rw-r--r--arch/arm/mm/mm.h6
-rw-r--r--arch/arm/mm/mmu.c5
-rw-r--r--arch/arm/mm/nommu.c4
-rw-r--r--arch/arm/mm/pabort-legacy.S10
-rw-r--r--arch/arm/mm/pabort-v6.S10
-rw-r--r--arch/arm/mm/pabort-v7.S11
-rw-r--r--arch/arm/mm/proc-arm6_7.S90
-rw-r--r--arch/arm/mm/proc-sa1100.S4
-rw-r--r--arch/arm/mm/tlb-fa.S4
-rw-r--r--arch/arm/mm/tlb-v6.S4
-rw-r--r--arch/arm/mm/tlb-v7.S6
-rw-r--r--arch/arm/plat-mxc/include/mach/entry-macro.S4
-rw-r--r--arch/arm/plat-omap/sram.c15
-rw-r--r--arch/arm/plat-orion/gpio.c2
-rw-r--r--arch/arm/plat-pxa/gpio.c10
-rw-r--r--arch/arm/plat-s3c24xx/dma.c24
-rw-r--r--arch/arm/plat-s3c24xx/sleep.S25
-rw-r--r--arch/arm/plat-s5p/irq-gpioint.c2
-rw-r--r--arch/arm/plat-s5p/s5p-time.c4
-rw-r--r--arch/arm/plat-samsung/dma.c6
-rw-r--r--arch/arm/plat-samsung/include/plat/devs.h6
-rw-r--r--arch/arm/plat-samsung/include/plat/dma.h21
-rw-r--r--arch/arm/plat-samsung/include/plat/pm.h5
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c64xx-spi.h2
-rw-r--r--arch/arm/plat-samsung/irq-uart.c9
-rw-r--r--arch/arm/plat-samsung/irq-vic-timer.c7
-rw-r--r--arch/arm/plat-samsung/pm.c11
-rw-r--r--arch/mips/kernel/i8259.c22
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c30
-rw-r--r--arch/sparc/include/asm/irqflags_32.h8
-rw-r--r--arch/sparc/include/asm/irqflags_64.h14
-rw-r--r--arch/sparc/kernel/entry.S2
-rw-r--r--arch/sparc/mm/leon_mm.c2
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/mmzone_32.h2
-rw-r--r--arch/x86/kernel/acpi/realmode/wakeup.S14
-rw-r--r--arch/x86/kernel/acpi/realmode/wakeup.h6
-rw-r--r--arch/x86/kernel/acpi/sleep.c6
-rw-r--r--arch/x86/kernel/reboot.c16
-rw-r--r--arch/x86/mm/init_64.c3
-rw-r--r--arch/x86/oprofile/nmi_int.c14
-rw-r--r--arch/x86/pci/xen.c56
-rw-r--r--arch/x86/platform/efi/efi.c3
165 files changed, 2226 insertions, 1863 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 9adc278a22ab..84fda2bebd7a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -37,6 +37,9 @@ config ARM
37 Europe. There is an ARM Linux project with a web page at 37 Europe. There is an ARM Linux project with a web page at
38 <http://www.arm.linux.org.uk/>. 38 <http://www.arm.linux.org.uk/>.
39 39
40config ARM_HAS_SG_CHAIN
41 bool
42
40config HAVE_PWM 43config HAVE_PWM
41 bool 44 bool
42 45
@@ -1346,7 +1349,6 @@ config SMP_ON_UP
1346 1349
1347config HAVE_ARM_SCU 1350config HAVE_ARM_SCU
1348 bool 1351 bool
1349 depends on SMP
1350 help 1352 help
1351 This option enables support for the ARM system coherency unit 1353 This option enables support for the ARM system coherency unit
1352 1354
@@ -1715,17 +1717,34 @@ config ZBOOT_ROM
1715 Say Y here if you intend to execute your compressed kernel image 1717 Say Y here if you intend to execute your compressed kernel image
1716 (zImage) directly from ROM or flash. If unsure, say N. 1718 (zImage) directly from ROM or flash. If unsure, say N.
1717 1719
1720choice
1721 prompt "Include SD/MMC loader in zImage (EXPERIMENTAL)"
1722 depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL
1723 default ZBOOT_ROM_NONE
1724 help
1725 Include experimental SD/MMC loading code in the ROM-able zImage.
1726 With this enabled it is possible to write the the ROM-able zImage
1727 kernel image to an MMC or SD card and boot the kernel straight
1728 from the reset vector. At reset the processor Mask ROM will load
1729 the first part of the the ROM-able zImage which in turn loads the
1730 rest the kernel image to RAM.
1731
1732config ZBOOT_ROM_NONE
1733 bool "No SD/MMC loader in zImage (EXPERIMENTAL)"
1734 help
1735 Do not load image from SD or MMC
1736
1718config ZBOOT_ROM_MMCIF 1737config ZBOOT_ROM_MMCIF
1719 bool "Include MMCIF loader in zImage (EXPERIMENTAL)" 1738 bool "Include MMCIF loader in zImage (EXPERIMENTAL)"
1720 depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL
1721 help 1739 help
1722 Say Y here to include experimental MMCIF loading code in the 1740 Load image from MMCIF hardware block.
1723 ROM-able zImage. With this enabled it is possible to write the 1741
1724 the ROM-able zImage kernel image to an MMC card and boot the 1742config ZBOOT_ROM_SH_MOBILE_SDHI
1725 kernel straight from the reset vector. At reset the processor 1743 bool "Include SuperH Mobile SDHI loader in zImage (EXPERIMENTAL)"
1726 Mask ROM will load the first part of the the ROM-able zImage 1744 help
1727 which in turn loads the rest the kernel image to RAM using the 1745 Load image from SDHI hardware block
1728 MMCIF hardware block. 1746
1747endchoice
1729 1748
1730config CMDLINE 1749config CMDLINE
1731 string "Default kernel command string" 1750 string "Default kernel command string"
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 23aad0722303..0c74a6fab952 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -6,13 +6,19 @@
6 6
7OBJS = 7OBJS =
8 8
9# Ensure that mmcif loader code appears early in the image 9# Ensure that MMCIF loader code appears early in the image
10# to minimise that number of bocks that have to be read in 10# to minimise that number of bocks that have to be read in
11# order to load it. 11# order to load it.
12ifeq ($(CONFIG_ZBOOT_ROM_MMCIF),y) 12ifeq ($(CONFIG_ZBOOT_ROM_MMCIF),y)
13ifeq ($(CONFIG_ARCH_SH7372),y)
14OBJS += mmcif-sh7372.o 13OBJS += mmcif-sh7372.o
15endif 14endif
15
16# Ensure that SDHI loader code appears early in the image
17# to minimise that number of bocks that have to be read in
18# order to load it.
19ifeq ($(CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI),y)
20OBJS += sdhi-shmobile.o
21OBJS += sdhi-sh7372.o
16endif 22endif
17 23
18AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET) 24AFLAGS_head.o += -DTEXT_OFFSET=$(TEXT_OFFSET)
diff --git a/arch/arm/boot/compressed/head-shmobile.S b/arch/arm/boot/compressed/head-shmobile.S
index c943d2e7da9d..fe3719b516fd 100644
--- a/arch/arm/boot/compressed/head-shmobile.S
+++ b/arch/arm/boot/compressed/head-shmobile.S
@@ -25,14 +25,14 @@
25 /* load board-specific initialization code */ 25 /* load board-specific initialization code */
26#include <mach/zboot.h> 26#include <mach/zboot.h>
27 27
28#ifdef CONFIG_ZBOOT_ROM_MMCIF 28#if defined(CONFIG_ZBOOT_ROM_MMCIF) || defined(CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI)
29 /* Load image from MMC */ 29 /* Load image from MMC/SD */
30 adr sp, __tmp_stack + 128 30 adr sp, __tmp_stack + 256
31 ldr r0, __image_start 31 ldr r0, __image_start
32 ldr r1, __image_end 32 ldr r1, __image_end
33 subs r1, r1, r0 33 subs r1, r1, r0
34 ldr r0, __load_base 34 ldr r0, __load_base
35 bl mmcif_loader 35 bl mmc_loader
36 36
37 /* Jump to loaded code */ 37 /* Jump to loaded code */
38 ldr r0, __loaded 38 ldr r0, __loaded
@@ -51,9 +51,9 @@ __loaded:
51 .long __continue 51 .long __continue
52 .align 52 .align
53__tmp_stack: 53__tmp_stack:
54 .space 128 54 .space 256
55__continue: 55__continue:
56#endif /* CONFIG_ZBOOT_ROM_MMCIF */ 56#endif /* CONFIG_ZBOOT_ROM_MMC || CONFIG_ZBOOT_ROM_SH_MOBILE_SDHI */
57 57
58 b 1f 58 b 1f
59__atags:@ tag #1 59__atags:@ tag #1
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 940b20178107..e95a5989602a 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -353,7 +353,8 @@ not_relocated: mov r0, #0
353 mov r0, #0 @ must be zero 353 mov r0, #0 @ must be zero
354 mov r1, r7 @ restore architecture number 354 mov r1, r7 @ restore architecture number
355 mov r2, r8 @ restore atags pointer 355 mov r2, r8 @ restore atags pointer
356 mov pc, r4 @ call kernel 356 ARM( mov pc, r4 ) @ call kernel
357 THUMB( bx r4 ) @ entry point is always ARM
357 358
358 .align 2 359 .align 2
359 .type LC0, #object 360 .type LC0, #object
diff --git a/arch/arm/boot/compressed/mmcif-sh7372.c b/arch/arm/boot/compressed/mmcif-sh7372.c
index 7453c8337b83..b6f61d9a5a1b 100644
--- a/arch/arm/boot/compressed/mmcif-sh7372.c
+++ b/arch/arm/boot/compressed/mmcif-sh7372.c
@@ -40,7 +40,7 @@
40 * to an MMC card 40 * to an MMC card
41 * # dd if=vrl4.out of=/dev/sdx bs=512 seek=1 41 * # dd if=vrl4.out of=/dev/sdx bs=512 seek=1
42 */ 42 */
43asmlinkage void mmcif_loader(unsigned char *buf, unsigned long len) 43asmlinkage void mmc_loader(unsigned char *buf, unsigned long len)
44{ 44{
45 mmc_init_progress(); 45 mmc_init_progress();
46 mmc_update_progress(MMC_PROGRESS_ENTER); 46 mmc_update_progress(MMC_PROGRESS_ENTER);
diff --git a/arch/arm/boot/compressed/sdhi-sh7372.c b/arch/arm/boot/compressed/sdhi-sh7372.c
new file mode 100644
index 000000000000..d403a8b24d7f
--- /dev/null
+++ b/arch/arm/boot/compressed/sdhi-sh7372.c
@@ -0,0 +1,95 @@
1/*
2 * SuperH Mobile SDHI
3 *
4 * Copyright (C) 2010 Magnus Damm
5 * Copyright (C) 2010 Kuninori Morimoto
6 * Copyright (C) 2010 Simon Horman
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
12 * Parts inspired by u-boot
13 */
14
15#include <linux/io.h>
16#include <mach/mmc.h>
17#include <linux/mmc/boot.h>
18#include <linux/mmc/tmio.h>
19
20#include "sdhi-shmobile.h"
21
22#define PORT179CR 0xe60520b3
23#define PORT180CR 0xe60520b4
24#define PORT181CR 0xe60520b5
25#define PORT182CR 0xe60520b6
26#define PORT183CR 0xe60520b7
27#define PORT184CR 0xe60520b8
28
29#define SMSTPCR3 0xe615013c
30
31#define CR_INPUT_ENABLE 0x10
32#define CR_FUNCTION1 0x01
33
34#define SDHI1_BASE (void __iomem *)0xe6860000
35#define SDHI_BASE SDHI1_BASE
36
37/* SuperH Mobile SDHI loader
38 *
39 * loads the zImage from an SD card starting from block 0
40 * on physical partition 1
41 *
42 * The image must be start with a vrl4 header and
43 * the zImage must start at offset 512 of the image. That is,
44 * at block 1 (=byte 512) of physical partition 1
45 *
46 * Use the following line to write the vrl4 formated zImage
47 * to an SD card
48 * # dd if=vrl4.out of=/dev/sdx bs=512
49 */
50asmlinkage void mmc_loader(unsigned short *buf, unsigned long len)
51{
52 int high_capacity;
53
54 mmc_init_progress();
55
56 mmc_update_progress(MMC_PROGRESS_ENTER);
57 /* Initialise SDHI1 */
58 /* PORT184CR: GPIO_FN_SDHICMD1 Control */
59 __raw_writeb(CR_FUNCTION1, PORT184CR);
60 /* PORT179CR: GPIO_FN_SDHICLK1 Control */
61 __raw_writeb(CR_INPUT_ENABLE|CR_FUNCTION1, PORT179CR);
62 /* PORT181CR: GPIO_FN_SDHID1_3 Control */
63 __raw_writeb(CR_FUNCTION1, PORT183CR);
64 /* PORT182CR: GPIO_FN_SDHID1_2 Control */
65 __raw_writeb(CR_FUNCTION1, PORT182CR);
66 /* PORT183CR: GPIO_FN_SDHID1_1 Control */
67 __raw_writeb(CR_FUNCTION1, PORT181CR);
68 /* PORT180CR: GPIO_FN_SDHID1_0 Control */
69 __raw_writeb(CR_FUNCTION1, PORT180CR);
70
71 /* Enable clock to SDHI1 hardware block */
72 __raw_writel(__raw_readl(SMSTPCR3) & ~(1 << 13), SMSTPCR3);
73
74 /* setup SDHI hardware */
75 mmc_update_progress(MMC_PROGRESS_INIT);
76 high_capacity = sdhi_boot_init(SDHI_BASE);
77 if (high_capacity < 0)
78 goto err;
79
80 mmc_update_progress(MMC_PROGRESS_LOAD);
81 /* load kernel */
82 if (sdhi_boot_do_read(SDHI_BASE, high_capacity,
83 0, /* Kernel is at block 1 */
84 (len + TMIO_BBS - 1) / TMIO_BBS, buf))
85 goto err;
86
87 /* Disable clock to SDHI1 hardware block */
88 __raw_writel(__raw_readl(SMSTPCR3) & (1 << 13), SMSTPCR3);
89
90 mmc_update_progress(MMC_PROGRESS_DONE);
91
92 return;
93err:
94 for(;;);
95}
diff --git a/arch/arm/boot/compressed/sdhi-shmobile.c b/arch/arm/boot/compressed/sdhi-shmobile.c
new file mode 100644
index 000000000000..bd3d46980955
--- /dev/null
+++ b/arch/arm/boot/compressed/sdhi-shmobile.c
@@ -0,0 +1,449 @@
1/*
2 * SuperH Mobile SDHI
3 *
4 * Copyright (C) 2010 Magnus Damm
5 * Copyright (C) 2010 Kuninori Morimoto
6 * Copyright (C) 2010 Simon Horman
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 *
12 * Parts inspired by u-boot
13 */
14
15#include <linux/io.h>
16#include <linux/mmc/host.h>
17#include <linux/mmc/core.h>
18#include <linux/mmc/mmc.h>
19#include <linux/mmc/sd.h>
20#include <linux/mmc/tmio.h>
21#include <mach/sdhi.h>
22
23#define OCR_FASTBOOT (1<<29)
24#define OCR_HCS (1<<30)
25#define OCR_BUSY (1<<31)
26
27#define RESP_CMD12 0x00000030
28
29static inline u16 sd_ctrl_read16(void __iomem *base, int addr)
30{
31 return __raw_readw(base + addr);
32}
33
34static inline u32 sd_ctrl_read32(void __iomem *base, int addr)
35{
36 return __raw_readw(base + addr) |
37 __raw_readw(base + addr + 2) << 16;
38}
39
40static inline void sd_ctrl_write16(void __iomem *base, int addr, u16 val)
41{
42 __raw_writew(val, base + addr);
43}
44
45static inline void sd_ctrl_write32(void __iomem *base, int addr, u32 val)
46{
47 __raw_writew(val, base + addr);
48 __raw_writew(val >> 16, base + addr + 2);
49}
50
51#define ALL_ERROR (TMIO_STAT_CMD_IDX_ERR | TMIO_STAT_CRCFAIL | \
52 TMIO_STAT_STOPBIT_ERR | TMIO_STAT_DATATIMEOUT | \
53 TMIO_STAT_RXOVERFLOW | TMIO_STAT_TXUNDERRUN | \
54 TMIO_STAT_CMDTIMEOUT | TMIO_STAT_ILL_ACCESS | \
55 TMIO_STAT_ILL_FUNC)
56
57static int sdhi_intr(void __iomem *base)
58{
59 unsigned long state = sd_ctrl_read32(base, CTL_STATUS);
60
61 if (state & ALL_ERROR) {
62 sd_ctrl_write32(base, CTL_STATUS, ~ALL_ERROR);
63 sd_ctrl_write32(base, CTL_IRQ_MASK,
64 ALL_ERROR |
65 sd_ctrl_read32(base, CTL_IRQ_MASK));
66 return -EINVAL;
67 }
68 if (state & TMIO_STAT_CMDRESPEND) {
69 sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_CMDRESPEND);
70 sd_ctrl_write32(base, CTL_IRQ_MASK,
71 TMIO_STAT_CMDRESPEND |
72 sd_ctrl_read32(base, CTL_IRQ_MASK));
73 return 0;
74 }
75 if (state & TMIO_STAT_RXRDY) {
76 sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_RXRDY);
77 sd_ctrl_write32(base, CTL_IRQ_MASK,
78 TMIO_STAT_RXRDY | TMIO_STAT_TXUNDERRUN |
79 sd_ctrl_read32(base, CTL_IRQ_MASK));
80 return 0;
81 }
82 if (state & TMIO_STAT_DATAEND) {
83 sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_DATAEND);
84 sd_ctrl_write32(base, CTL_IRQ_MASK,
85 TMIO_STAT_DATAEND |
86 sd_ctrl_read32(base, CTL_IRQ_MASK));
87 return 0;
88 }
89
90 return -EAGAIN;
91}
92
93static int sdhi_boot_wait_resp_end(void __iomem *base)
94{
95 int err = -EAGAIN, timeout = 10000000;
96
97 while (timeout--) {
98 err = sdhi_intr(base);
99 if (err != -EAGAIN)
100 break;
101 udelay(1);
102 }
103
104 return err;
105}
106
107/* SDHI_CLK_CTRL */
108#define CLK_MMC_ENABLE (1 << 8)
109#define CLK_MMC_INIT (1 << 6) /* clk / 256 */
110
111static void sdhi_boot_mmc_clk_stop(void __iomem *base)
112{
113 sd_ctrl_write16(base, CTL_CLK_AND_WAIT_CTL, 0x0000);
114 msleep(10);
115 sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, ~CLK_MMC_ENABLE &
116 sd_ctrl_read16(base, CTL_SD_CARD_CLK_CTL));
117 msleep(10);
118}
119
120static void sdhi_boot_mmc_clk_start(void __iomem *base)
121{
122 sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL, CLK_MMC_ENABLE |
123 sd_ctrl_read16(base, CTL_SD_CARD_CLK_CTL));
124 msleep(10);
125 sd_ctrl_write16(base, CTL_CLK_AND_WAIT_CTL, CLK_MMC_ENABLE);
126 msleep(10);
127}
128
129static void sdhi_boot_reset(void __iomem *base)
130{
131 sd_ctrl_write16(base, CTL_RESET_SD, 0x0000);
132 msleep(10);
133 sd_ctrl_write16(base, CTL_RESET_SD, 0x0001);
134 msleep(10);
135}
136
137/* Set MMC clock / power.
138 * Note: This controller uses a simple divider scheme therefore it cannot
139 * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as
140 * MMC wont run that fast, it has to be clocked at 12MHz which is the next
141 * slowest setting.
142 */
143static int sdhi_boot_mmc_set_ios(void __iomem *base, struct mmc_ios *ios)
144{
145 if (sd_ctrl_read32(base, CTL_STATUS) & TMIO_STAT_CMD_BUSY)
146 return -EBUSY;
147
148 if (ios->clock)
149 sd_ctrl_write16(base, CTL_SD_CARD_CLK_CTL,
150 ios->clock | CLK_MMC_ENABLE);
151
152 /* Power sequence - OFF -> ON -> UP */
153 switch (ios->power_mode) {
154 case MMC_POWER_OFF: /* power down SD bus */
155 sdhi_boot_mmc_clk_stop(base);
156 break;
157 case MMC_POWER_ON: /* power up SD bus */
158 break;
159 case MMC_POWER_UP: /* start bus clock */
160 sdhi_boot_mmc_clk_start(base);
161 break;
162 }
163
164 switch (ios->bus_width) {
165 case MMC_BUS_WIDTH_1:
166 sd_ctrl_write16(base, CTL_SD_MEM_CARD_OPT, 0x80e0);
167 break;
168 case MMC_BUS_WIDTH_4:
169 sd_ctrl_write16(base, CTL_SD_MEM_CARD_OPT, 0x00e0);
170 break;
171 }
172
173 /* Let things settle. delay taken from winCE driver */
174 udelay(140);
175
176 return 0;
177}
178
179/* These are the bitmasks the tmio chip requires to implement the MMC response
180 * types. Note that R1 and R6 are the same in this scheme. */
181#define RESP_NONE 0x0300
182#define RESP_R1 0x0400
183#define RESP_R1B 0x0500
184#define RESP_R2 0x0600
185#define RESP_R3 0x0700
186#define DATA_PRESENT 0x0800
187#define TRANSFER_READ 0x1000
188
189static int sdhi_boot_request(void __iomem *base, struct mmc_command *cmd)
190{
191 int err, c = cmd->opcode;
192
193 switch (mmc_resp_type(cmd)) {
194 case MMC_RSP_NONE: c |= RESP_NONE; break;
195 case MMC_RSP_R1: c |= RESP_R1; break;
196 case MMC_RSP_R1B: c |= RESP_R1B; break;
197 case MMC_RSP_R2: c |= RESP_R2; break;
198 case MMC_RSP_R3: c |= RESP_R3; break;
199 default:
200 return -EINVAL;
201 }
202
203 /* No interrupts so this may not be cleared */
204 sd_ctrl_write32(base, CTL_STATUS, ~TMIO_STAT_CMDRESPEND);
205
206 sd_ctrl_write32(base, CTL_IRQ_MASK, TMIO_STAT_CMDRESPEND |
207 sd_ctrl_read32(base, CTL_IRQ_MASK));
208 sd_ctrl_write32(base, CTL_ARG_REG, cmd->arg);
209 sd_ctrl_write16(base, CTL_SD_CMD, c);
210
211
212 sd_ctrl_write32(base, CTL_IRQ_MASK,
213 ~(TMIO_STAT_CMDRESPEND | ALL_ERROR) &
214 sd_ctrl_read32(base, CTL_IRQ_MASK));
215
216 err = sdhi_boot_wait_resp_end(base);
217 if (err)
218 return err;
219
220 cmd->resp[0] = sd_ctrl_read32(base, CTL_RESPONSE);
221
222 return 0;
223}
224
225static int sdhi_boot_do_read_single(void __iomem *base, int high_capacity,
226 unsigned long block, unsigned short *buf)
227{
228 int err, i;
229
230 /* CMD17 - Read */
231 {
232 struct mmc_command cmd;
233
234 cmd.opcode = MMC_READ_SINGLE_BLOCK | \
235 TRANSFER_READ | DATA_PRESENT;
236 if (high_capacity)
237 cmd.arg = block;
238 else
239 cmd.arg = block * TMIO_BBS;
240 cmd.flags = MMC_RSP_R1;
241 err = sdhi_boot_request(base, &cmd);
242 if (err)
243 return err;
244 }
245
246 sd_ctrl_write32(base, CTL_IRQ_MASK,
247 ~(TMIO_STAT_DATAEND | TMIO_STAT_RXRDY |
248 TMIO_STAT_TXUNDERRUN) &
249 sd_ctrl_read32(base, CTL_IRQ_MASK));
250 err = sdhi_boot_wait_resp_end(base);
251 if (err)
252 return err;
253
254 sd_ctrl_write16(base, CTL_SD_XFER_LEN, TMIO_BBS);
255 for (i = 0; i < TMIO_BBS / sizeof(*buf); i++)
256 *buf++ = sd_ctrl_read16(base, RESP_CMD12);
257
258 err = sdhi_boot_wait_resp_end(base);
259 if (err)
260 return err;
261
262 return 0;
263}
264
265int sdhi_boot_do_read(void __iomem *base, int high_capacity,
266 unsigned long offset, unsigned short count,
267 unsigned short *buf)
268{
269 unsigned long i;
270 int err = 0;
271
272 for (i = 0; i < count; i++) {
273 err = sdhi_boot_do_read_single(base, high_capacity, offset + i,
274 buf + (i * TMIO_BBS /
275 sizeof(*buf)));
276 if (err)
277 return err;
278 }
279
280 return 0;
281}
282
283#define VOLTAGES (MMC_VDD_32_33 | MMC_VDD_33_34)
284
285int sdhi_boot_init(void __iomem *base)
286{
287 bool sd_v2 = false, sd_v1_0 = false;
288 unsigned short cid;
289 int err, high_capacity = 0;
290
291 sdhi_boot_mmc_clk_stop(base);
292 sdhi_boot_reset(base);
293
294 /* mmc0: clock 400000Hz busmode 1 powermode 2 cs 0 Vdd 21 width 0 timing 0 */
295 {
296 struct mmc_ios ios;
297 ios.power_mode = MMC_POWER_ON;
298 ios.bus_width = MMC_BUS_WIDTH_1;
299 ios.clock = CLK_MMC_INIT;
300 err = sdhi_boot_mmc_set_ios(base, &ios);
301 if (err)
302 return err;
303 }
304
305 /* CMD0 */
306 {
307 struct mmc_command cmd;
308 msleep(1);
309 cmd.opcode = MMC_GO_IDLE_STATE;
310 cmd.arg = 0;
311 cmd.flags = MMC_RSP_NONE;
312 err = sdhi_boot_request(base, &cmd);
313 if (err)
314 return err;
315 msleep(2);
316 }
317
318 /* CMD8 - Test for SD version 2 */
319 {
320 struct mmc_command cmd;
321 cmd.opcode = SD_SEND_IF_COND;
322 cmd.arg = (VOLTAGES != 0) << 8 | 0xaa;
323 cmd.flags = MMC_RSP_R1;
324 err = sdhi_boot_request(base, &cmd); /* Ignore error */
325 if ((cmd.resp[0] & 0xff) == 0xaa)
326 sd_v2 = true;
327 }
328
329 /* CMD55 - Get OCR (SD) */
330 {
331 int timeout = 1000;
332 struct mmc_command cmd;
333
334 cmd.arg = 0;
335
336 do {
337 cmd.opcode = MMC_APP_CMD;
338 cmd.flags = MMC_RSP_R1;
339 cmd.arg = 0;
340 err = sdhi_boot_request(base, &cmd);
341 if (err)
342 break;
343
344 cmd.opcode = SD_APP_OP_COND;
345 cmd.flags = MMC_RSP_R3;
346 cmd.arg = (VOLTAGES & 0xff8000);
347 if (sd_v2)
348 cmd.arg |= OCR_HCS;
349 cmd.arg |= OCR_FASTBOOT;
350 err = sdhi_boot_request(base, &cmd);
351 if (err)
352 break;
353
354 msleep(1);
355 } while((!(cmd.resp[0] & OCR_BUSY)) && --timeout);
356
357 if (!err && timeout) {
358 if (!sd_v2)
359 sd_v1_0 = true;
360 high_capacity = (cmd.resp[0] & OCR_HCS) == OCR_HCS;
361 }
362 }
363
364 /* CMD1 - Get OCR (MMC) */
365 if (!sd_v2 && !sd_v1_0) {
366 int timeout = 1000;
367 struct mmc_command cmd;
368
369 do {
370 cmd.opcode = MMC_SEND_OP_COND;
371 cmd.arg = VOLTAGES | OCR_HCS;
372 cmd.flags = MMC_RSP_R3;
373 err = sdhi_boot_request(base, &cmd);
374 if (err)
375 return err;
376
377 msleep(1);
378 } while((!(cmd.resp[0] & OCR_BUSY)) && --timeout);
379
380 if (!timeout)
381 return -EAGAIN;
382
383 high_capacity = (cmd.resp[0] & OCR_HCS) == OCR_HCS;
384 }
385
386 /* CMD2 - Get CID */
387 {
388 struct mmc_command cmd;
389 cmd.opcode = MMC_ALL_SEND_CID;
390 cmd.arg = 0;
391 cmd.flags = MMC_RSP_R2;
392 err = sdhi_boot_request(base, &cmd);
393 if (err)
394 return err;
395 }
396
397 /* CMD3
398 * MMC: Set the relative address
399 * SD: Get the relative address
400 * Also puts the card into the standby state
401 */
402 {
403 struct mmc_command cmd;
404 cmd.opcode = MMC_SET_RELATIVE_ADDR;
405 cmd.arg = 0;
406 cmd.flags = MMC_RSP_R1;
407 err = sdhi_boot_request(base, &cmd);
408 if (err)
409 return err;
410 cid = cmd.resp[0] >> 16;
411 }
412
413 /* CMD9 - Get CSD */
414 {
415 struct mmc_command cmd;
416 cmd.opcode = MMC_SEND_CSD;
417 cmd.arg = cid << 16;
418 cmd.flags = MMC_RSP_R2;
419 err = sdhi_boot_request(base, &cmd);
420 if (err)
421 return err;
422 }
423
424 /* CMD7 - Select the card */
425 {
426 struct mmc_command cmd;
427 cmd.opcode = MMC_SELECT_CARD;
428 //cmd.arg = rca << 16;
429 cmd.arg = cid << 16;
430 //cmd.flags = MMC_RSP_R1B;
431 cmd.flags = MMC_RSP_R1;
432 err = sdhi_boot_request(base, &cmd);
433 if (err)
434 return err;
435 }
436
437 /* CMD16 - Set the block size */
438 {
439 struct mmc_command cmd;
440 cmd.opcode = MMC_SET_BLOCKLEN;
441 cmd.arg = TMIO_BBS;
442 cmd.flags = MMC_RSP_R1;
443 err = sdhi_boot_request(base, &cmd);
444 if (err)
445 return err;
446 }
447
448 return high_capacity;
449}
diff --git a/arch/arm/boot/compressed/sdhi-shmobile.h b/arch/arm/boot/compressed/sdhi-shmobile.h
new file mode 100644
index 000000000000..92eaa09f985e
--- /dev/null
+++ b/arch/arm/boot/compressed/sdhi-shmobile.h
@@ -0,0 +1,11 @@
1#ifndef SDHI_MOBILE_H
2#define SDHI_MOBILE_H
3
4#include <linux/compiler.h>
5
6int sdhi_boot_do_read(void __iomem *base, int high_capacity,
7 unsigned long offset, unsigned short count,
8 unsigned short *buf);
9int sdhi_boot_init(void __iomem *base);
10
11#endif
diff --git a/arch/arm/boot/compressed/vmlinux.lds.in b/arch/arm/boot/compressed/vmlinux.lds.in
index ea80abe78844..4e728834a1b9 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.in
+++ b/arch/arm/boot/compressed/vmlinux.lds.in
@@ -33,20 +33,24 @@ SECTIONS
33 *(.text.*) 33 *(.text.*)
34 *(.fixup) 34 *(.fixup)
35 *(.gnu.warning) 35 *(.gnu.warning)
36 *(.glue_7t)
37 *(.glue_7)
38 }
39 .rodata : {
36 *(.rodata) 40 *(.rodata)
37 *(.rodata.*) 41 *(.rodata.*)
38 *(.glue_7) 42 }
39 *(.glue_7t) 43 .piggydata : {
40 *(.piggydata) 44 *(.piggydata)
41 . = ALIGN(4);
42 } 45 }
43 46
47 . = ALIGN(4);
44 _etext = .; 48 _etext = .;
45 49
50 .got.plt : { *(.got.plt) }
46 _got_start = .; 51 _got_start = .;
47 .got : { *(.got) } 52 .got : { *(.got) }
48 _got_end = .; 53 _got_end = .;
49 .got.plt : { *(.got.plt) }
50 _edata = .; 54 _edata = .;
51 55
52 . = BSS_START; 56 . = BSS_START;
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index e5681636626f..595ecd290ebf 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -79,6 +79,8 @@ struct dmabounce_device_info {
79 struct dmabounce_pool large; 79 struct dmabounce_pool large;
80 80
81 rwlock_t lock; 81 rwlock_t lock;
82
83 int (*needs_bounce)(struct device *, dma_addr_t, size_t);
82}; 84};
83 85
84#ifdef STATS 86#ifdef STATS
@@ -210,114 +212,91 @@ static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
210 if (!dev || !dev->archdata.dmabounce) 212 if (!dev || !dev->archdata.dmabounce)
211 return NULL; 213 return NULL;
212 if (dma_mapping_error(dev, dma_addr)) { 214 if (dma_mapping_error(dev, dma_addr)) {
213 if (dev) 215 dev_err(dev, "Trying to %s invalid mapping\n", where);
214 dev_err(dev, "Trying to %s invalid mapping\n", where);
215 else
216 pr_err("unknown device: Trying to %s invalid mapping\n", where);
217 return NULL; 216 return NULL;
218 } 217 }
219 return find_safe_buffer(dev->archdata.dmabounce, dma_addr); 218 return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
220} 219}
221 220
222static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size, 221static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
223 enum dma_data_direction dir)
224{ 222{
225 struct dmabounce_device_info *device_info = dev->archdata.dmabounce; 223 if (!dev || !dev->archdata.dmabounce)
226 dma_addr_t dma_addr; 224 return 0;
227 int needs_bounce = 0;
228
229 if (device_info)
230 DO_STATS ( device_info->map_op_count++ );
231
232 dma_addr = virt_to_dma(dev, ptr);
233 225
234 if (dev->dma_mask) { 226 if (dev->dma_mask) {
235 unsigned long mask = *dev->dma_mask; 227 unsigned long limit, mask = *dev->dma_mask;
236 unsigned long limit;
237 228
238 limit = (mask + 1) & ~mask; 229 limit = (mask + 1) & ~mask;
239 if (limit && size > limit) { 230 if (limit && size > limit) {
240 dev_err(dev, "DMA mapping too big (requested %#x " 231 dev_err(dev, "DMA mapping too big (requested %#x "
241 "mask %#Lx)\n", size, *dev->dma_mask); 232 "mask %#Lx)\n", size, *dev->dma_mask);
242 return ~0; 233 return -E2BIG;
243 } 234 }
244 235
245 /* 236 /* Figure out if we need to bounce from the DMA mask. */
246 * Figure out if we need to bounce from the DMA mask. 237 if ((dma_addr | (dma_addr + size - 1)) & ~mask)
247 */ 238 return 1;
248 needs_bounce = (dma_addr | (dma_addr + size - 1)) & ~mask;
249 } 239 }
250 240
251 if (device_info && (needs_bounce || dma_needs_bounce(dev, dma_addr, size))) { 241 return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
252 struct safe_buffer *buf; 242}
253 243
254 buf = alloc_safe_buffer(device_info, ptr, size, dir); 244static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
255 if (buf == 0) { 245 enum dma_data_direction dir)
256 dev_err(dev, "%s: unable to map unsafe buffer %p!\n", 246{
257 __func__, ptr); 247 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
258 return 0; 248 struct safe_buffer *buf;
259 }
260 249
261 dev_dbg(dev, 250 if (device_info)
262 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 251 DO_STATS ( device_info->map_op_count++ );
263 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
264 buf->safe, buf->safe_dma_addr);
265 252
266 if ((dir == DMA_TO_DEVICE) || 253 buf = alloc_safe_buffer(device_info, ptr, size, dir);
267 (dir == DMA_BIDIRECTIONAL)) { 254 if (buf == NULL) {
268 dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n", 255 dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
269 __func__, ptr, buf->safe, size); 256 __func__, ptr);
270 memcpy(buf->safe, ptr, size); 257 return ~0;
271 } 258 }
272 ptr = buf->safe;
273 259
274 dma_addr = buf->safe_dma_addr; 260 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
275 } else { 261 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
276 /* 262 buf->safe, buf->safe_dma_addr);
277 * We don't need to sync the DMA buffer since 263
278 * it was allocated via the coherent allocators. 264 if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
279 */ 265 dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
280 __dma_single_cpu_to_dev(ptr, size, dir); 266 __func__, ptr, buf->safe, size);
267 memcpy(buf->safe, ptr, size);
281 } 268 }
282 269
283 return dma_addr; 270 return buf->safe_dma_addr;
284} 271}
285 272
286static inline void unmap_single(struct device *dev, dma_addr_t dma_addr, 273static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
287 size_t size, enum dma_data_direction dir) 274 size_t size, enum dma_data_direction dir)
288{ 275{
289 struct safe_buffer *buf = find_safe_buffer_dev(dev, dma_addr, "unmap"); 276 BUG_ON(buf->size != size);
290 277 BUG_ON(buf->direction != dir);
291 if (buf) {
292 BUG_ON(buf->size != size);
293 BUG_ON(buf->direction != dir);
294 278
295 dev_dbg(dev, 279 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
296 "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 280 __func__, buf->ptr, virt_to_dma(dev, buf->ptr),
297 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), 281 buf->safe, buf->safe_dma_addr);
298 buf->safe, buf->safe_dma_addr);
299 282
300 DO_STATS(dev->archdata.dmabounce->bounce_count++); 283 DO_STATS(dev->archdata.dmabounce->bounce_count++);
301 284
302 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) { 285 if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
303 void *ptr = buf->ptr; 286 void *ptr = buf->ptr;
304 287
305 dev_dbg(dev, 288 dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
306 "%s: copy back safe %p to unsafe %p size %d\n", 289 __func__, buf->safe, ptr, size);
307 __func__, buf->safe, ptr, size); 290 memcpy(ptr, buf->safe, size);
308 memcpy(ptr, buf->safe, size);
309 291
310 /* 292 /*
311 * Since we may have written to a page cache page, 293 * Since we may have written to a page cache page,
312 * we need to ensure that the data will be coherent 294 * we need to ensure that the data will be coherent
313 * with user mappings. 295 * with user mappings.
314 */ 296 */
315 __cpuc_flush_dcache_area(ptr, size); 297 __cpuc_flush_dcache_area(ptr, size);
316 }
317 free_safe_buffer(dev->archdata.dmabounce, buf);
318 } else {
319 __dma_single_dev_to_cpu(dma_to_virt(dev, dma_addr), size, dir);
320 } 298 }
299 free_safe_buffer(dev->archdata.dmabounce, buf);
321} 300}
322 301
323/* ************************************************** */ 302/* ************************************************** */
@@ -328,45 +307,28 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
328 * substitute the safe buffer for the unsafe one. 307 * substitute the safe buffer for the unsafe one.
329 * (basically move the buffer from an unsafe area to a safe one) 308 * (basically move the buffer from an unsafe area to a safe one)
330 */ 309 */
331dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,
332 enum dma_data_direction dir)
333{
334 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
335 __func__, ptr, size, dir);
336
337 BUG_ON(!valid_dma_direction(dir));
338
339 return map_single(dev, ptr, size, dir);
340}
341EXPORT_SYMBOL(__dma_map_single);
342
343/*
344 * see if a mapped address was really a "safe" buffer and if so, copy
345 * the data from the safe buffer back to the unsafe buffer and free up
346 * the safe buffer. (basically return things back to the way they
347 * should be)
348 */
349void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
350 enum dma_data_direction dir)
351{
352 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
353 __func__, (void *) dma_addr, size, dir);
354
355 unmap_single(dev, dma_addr, size, dir);
356}
357EXPORT_SYMBOL(__dma_unmap_single);
358
359dma_addr_t __dma_map_page(struct device *dev, struct page *page, 310dma_addr_t __dma_map_page(struct device *dev, struct page *page,
360 unsigned long offset, size_t size, enum dma_data_direction dir) 311 unsigned long offset, size_t size, enum dma_data_direction dir)
361{ 312{
313 dma_addr_t dma_addr;
314 int ret;
315
362 dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n", 316 dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
363 __func__, page, offset, size, dir); 317 __func__, page, offset, size, dir);
364 318
365 BUG_ON(!valid_dma_direction(dir)); 319 dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
320
321 ret = needs_bounce(dev, dma_addr, size);
322 if (ret < 0)
323 return ~0;
324
325 if (ret == 0) {
326 __dma_page_cpu_to_dev(page, offset, size, dir);
327 return dma_addr;
328 }
366 329
367 if (PageHighMem(page)) { 330 if (PageHighMem(page)) {
368 dev_err(dev, "DMA buffer bouncing of HIGHMEM pages " 331 dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
369 "is not supported\n");
370 return ~0; 332 return ~0;
371 } 333 }
372 334
@@ -383,10 +345,19 @@ EXPORT_SYMBOL(__dma_map_page);
383void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, 345void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
384 enum dma_data_direction dir) 346 enum dma_data_direction dir)
385{ 347{
386 dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n", 348 struct safe_buffer *buf;
387 __func__, (void *) dma_addr, size, dir); 349
350 dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
351 __func__, dma_addr, size, dir);
352
353 buf = find_safe_buffer_dev(dev, dma_addr, __func__);
354 if (!buf) {
355 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)),
356 dma_addr & ~PAGE_MASK, size, dir);
357 return;
358 }
388 359
389 unmap_single(dev, dma_addr, size, dir); 360 unmap_single(dev, buf, size, dir);
390} 361}
391EXPORT_SYMBOL(__dma_unmap_page); 362EXPORT_SYMBOL(__dma_unmap_page);
392 363
@@ -461,7 +432,8 @@ static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
461} 432}
462 433
463int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size, 434int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
464 unsigned long large_buffer_size) 435 unsigned long large_buffer_size,
436 int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
465{ 437{
466 struct dmabounce_device_info *device_info; 438 struct dmabounce_device_info *device_info;
467 int ret; 439 int ret;
@@ -497,6 +469,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
497 device_info->dev = dev; 469 device_info->dev = dev;
498 INIT_LIST_HEAD(&device_info->safe_buffers); 470 INIT_LIST_HEAD(&device_info->safe_buffers);
499 rwlock_init(&device_info->lock); 471 rwlock_init(&device_info->lock);
472 device_info->needs_bounce = needs_bounce_fn;
500 473
501#ifdef STATS 474#ifdef STATS
502 device_info->total_allocs = 0; 475 device_info->total_allocs = 0;
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index 4ddd0a6ac7ff..7bdd91766d65 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -179,22 +179,21 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
179{ 179{
180 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); 180 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3);
181 unsigned int shift = (d->irq % 4) * 8; 181 unsigned int shift = (d->irq % 4) * 8;
182 unsigned int cpu = cpumask_first(mask_val); 182 unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask);
183 u32 val, mask, bit; 183 u32 val, mask, bit;
184 184
185 if (cpu >= 8) 185 if (cpu >= 8 || cpu >= nr_cpu_ids)
186 return -EINVAL; 186 return -EINVAL;
187 187
188 mask = 0xff << shift; 188 mask = 0xff << shift;
189 bit = 1 << (cpu + shift); 189 bit = 1 << (cpu + shift);
190 190
191 spin_lock(&irq_controller_lock); 191 spin_lock(&irq_controller_lock);
192 d->node = cpu;
193 val = readl_relaxed(reg) & ~mask; 192 val = readl_relaxed(reg) & ~mask;
194 writel_relaxed(val | bit, reg); 193 writel_relaxed(val | bit, reg);
195 spin_unlock(&irq_controller_lock); 194 spin_unlock(&irq_controller_lock);
196 195
197 return 0; 196 return IRQ_SET_MASK_OK;
198} 197}
199#endif 198#endif
200 199
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c
index 7a21927c52e1..14ad62e16dd1 100644
--- a/arch/arm/common/it8152.c
+++ b/arch/arm/common/it8152.c
@@ -243,6 +243,12 @@ static struct resource it8152_mem = {
243 * ITE8152 chip can address up to 64MByte, so all the devices 243 * ITE8152 chip can address up to 64MByte, so all the devices
244 * connected to ITE8152 (PCI and USB) should have limited DMA window 244 * connected to ITE8152 (PCI and USB) should have limited DMA window
245 */ 245 */
246static int it8152_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
247{
248 dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
249 __func__, dma_addr, size);
250 return (dma_addr + size - PHYS_OFFSET) >= SZ_64M;
251}
246 252
247/* 253/*
248 * Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all 254 * Setup DMA mask to 64MB on devices connected to ITE8152. Ignore all
@@ -254,7 +260,7 @@ static int it8152_pci_platform_notify(struct device *dev)
254 if (dev->dma_mask) 260 if (dev->dma_mask)
255 *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET; 261 *dev->dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
256 dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET; 262 dev->coherent_dma_mask = (SZ_64M - 1) | PHYS_OFFSET;
257 dmabounce_register_dev(dev, 2048, 4096); 263 dmabounce_register_dev(dev, 2048, 4096, it8152_needs_bounce);
258 } 264 }
259 return 0; 265 return 0;
260} 266}
@@ -267,14 +273,6 @@ static int it8152_pci_platform_notify_remove(struct device *dev)
267 return 0; 273 return 0;
268} 274}
269 275
270int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
271{
272 dev_dbg(dev, "%s: dma_addr %08x, size %08x\n",
273 __func__, dma_addr, size);
274 return (dev->bus == &pci_bus_type) &&
275 ((dma_addr + size - PHYS_OFFSET) >= SZ_64M);
276}
277
278int dma_set_coherent_mask(struct device *dev, u64 mask) 276int dma_set_coherent_mask(struct device *dev, u64 mask)
279{ 277{
280 if (mask >= PHYS_OFFSET + SZ_64M - 1) 278 if (mask >= PHYS_OFFSET + SZ_64M - 1)
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index 9c49a46a2b7a..0569de6acfba 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -579,7 +579,36 @@ sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac,
579 579
580 sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2]; 580 sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2];
581} 581}
582#endif
582 583
584#ifdef CONFIG_DMABOUNCE
585/*
586 * According to the "Intel StrongARM SA-1111 Microprocessor Companion
587 * Chip Specification Update" (June 2000), erratum #7, there is a
588 * significant bug in the SA1111 SDRAM shared memory controller. If
589 * an access to a region of memory above 1MB relative to the bank base,
590 * it is important that address bit 10 _NOT_ be asserted. Depending
591 * on the configuration of the RAM, bit 10 may correspond to one
592 * of several different (processor-relative) address bits.
593 *
594 * This routine only identifies whether or not a given DMA address
595 * is susceptible to the bug.
596 *
597 * This should only get called for sa1111_device types due to the
598 * way we configure our device dma_masks.
599 */
600static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
601{
602 /*
603 * Section 4.6 of the "Intel StrongARM SA-1111 Development Module
604 * User's Guide" mentions that jumpers R51 and R52 control the
605 * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
606 * SDRAM bank 1 on Neponset). The default configuration selects
607 * Assabet, so any address in bank 1 is necessarily invalid.
608 */
609 return (machine_is_assabet() || machine_is_pfs168()) &&
610 (addr >= 0xc8000000 || (addr + size) >= 0xc8000000);
611}
583#endif 612#endif
584 613
585static void sa1111_dev_release(struct device *_dev) 614static void sa1111_dev_release(struct device *_dev)
@@ -644,7 +673,8 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
644 dev->dev.dma_mask = &dev->dma_mask; 673 dev->dev.dma_mask = &dev->dma_mask;
645 674
646 if (dev->dma_mask != 0xffffffffUL) { 675 if (dev->dma_mask != 0xffffffffUL) {
647 ret = dmabounce_register_dev(&dev->dev, 1024, 4096); 676 ret = dmabounce_register_dev(&dev->dev, 1024, 4096,
677 sa1111_needs_bounce);
648 if (ret) { 678 if (ret) {
649 dev_err(&dev->dev, "SA1111: Failed to register" 679 dev_err(&dev->dev, "SA1111: Failed to register"
650 " with dmabounce\n"); 680 " with dmabounce\n");
@@ -818,34 +848,6 @@ static void __sa1111_remove(struct sa1111 *sachip)
818 kfree(sachip); 848 kfree(sachip);
819} 849}
820 850
821/*
822 * According to the "Intel StrongARM SA-1111 Microprocessor Companion
823 * Chip Specification Update" (June 2000), erratum #7, there is a
824 * significant bug in the SA1111 SDRAM shared memory controller. If
825 * an access to a region of memory above 1MB relative to the bank base,
826 * it is important that address bit 10 _NOT_ be asserted. Depending
827 * on the configuration of the RAM, bit 10 may correspond to one
828 * of several different (processor-relative) address bits.
829 *
830 * This routine only identifies whether or not a given DMA address
831 * is susceptible to the bug.
832 *
833 * This should only get called for sa1111_device types due to the
834 * way we configure our device dma_masks.
835 */
836int dma_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
837{
838 /*
839 * Section 4.6 of the "Intel StrongARM SA-1111 Development Module
840 * User's Guide" mentions that jumpers R51 and R52 control the
841 * target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
842 * SDRAM bank 1 on Neponset). The default configuration selects
843 * Assabet, so any address in bank 1 is necessarily invalid.
844 */
845 return ((machine_is_assabet() || machine_is_pfs168()) &&
846 (addr >= 0xc8000000 || (addr + size) >= 0xc8000000));
847}
848
849struct sa1111_save_data { 851struct sa1111_save_data {
850 unsigned int skcr; 852 unsigned int skcr;
851 unsigned int skpcr; 853 unsigned int skpcr;
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h
index b4892a06442c..f4280593dfa3 100644
--- a/arch/arm/include/asm/bitops.h
+++ b/arch/arm/include/asm/bitops.h
@@ -26,8 +26,8 @@
26#include <linux/compiler.h> 26#include <linux/compiler.h>
27#include <asm/system.h> 27#include <asm/system.h>
28 28
29#define smp_mb__before_clear_bit() mb() 29#define smp_mb__before_clear_bit() smp_mb()
30#define smp_mb__after_clear_bit() mb() 30#define smp_mb__after_clear_bit() smp_mb()
31 31
32/* 32/*
33 * These functions are the basis of our bit ops. 33 * These functions are the basis of our bit ops.
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 4fff837363ed..7a21d0bf7134 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -115,39 +115,8 @@ static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
115 ___dma_page_dev_to_cpu(page, off, size, dir); 115 ___dma_page_dev_to_cpu(page, off, size, dir);
116} 116}
117 117
118/* 118extern int dma_supported(struct device *, u64);
119 * Return whether the given device DMA address mask can be supported 119extern int dma_set_mask(struct device *, u64);
120 * properly. For example, if your device can only drive the low 24-bits
121 * during bus mastering, then you would pass 0x00ffffff as the mask
122 * to this function.
123 *
124 * FIXME: This should really be a platform specific issue - we should
125 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
126 */
127static inline int dma_supported(struct device *dev, u64 mask)
128{
129 if (mask < ISA_DMA_THRESHOLD)
130 return 0;
131 return 1;
132}
133
134static inline int dma_set_mask(struct device *dev, u64 dma_mask)
135{
136#ifdef CONFIG_DMABOUNCE
137 if (dev->archdata.dmabounce) {
138 if (dma_mask >= ISA_DMA_THRESHOLD)
139 return 0;
140 else
141 return -EIO;
142 }
143#endif
144 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
145 return -EIO;
146
147 *dev->dma_mask = dma_mask;
148
149 return 0;
150}
151 120
152/* 121/*
153 * DMA errors are defined by all-bits-set in the DMA address. 122 * DMA errors are defined by all-bits-set in the DMA address.
@@ -256,14 +225,14 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
256 * @dev: valid struct device pointer 225 * @dev: valid struct device pointer
257 * @small_buf_size: size of buffers to use with small buffer pool 226 * @small_buf_size: size of buffers to use with small buffer pool
258 * @large_buf_size: size of buffers to use with large buffer pool (can be 0) 227 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
228 * @needs_bounce_fn: called to determine whether buffer needs bouncing
259 * 229 *
260 * This function should be called by low-level platform code to register 230 * This function should be called by low-level platform code to register
261 * a device as requireing DMA buffer bouncing. The function will allocate 231 * a device as requireing DMA buffer bouncing. The function will allocate
262 * appropriate DMA pools for the device. 232 * appropriate DMA pools for the device.
263 *
264 */ 233 */
265extern int dmabounce_register_dev(struct device *, unsigned long, 234extern int dmabounce_register_dev(struct device *, unsigned long,
266 unsigned long); 235 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
267 236
268/** 237/**
269 * dmabounce_unregister_dev 238 * dmabounce_unregister_dev
@@ -277,31 +246,9 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
277 */ 246 */
278extern void dmabounce_unregister_dev(struct device *); 247extern void dmabounce_unregister_dev(struct device *);
279 248
280/**
281 * dma_needs_bounce
282 *
283 * @dev: valid struct device pointer
284 * @dma_handle: dma_handle of unbounced buffer
285 * @size: size of region being mapped
286 *
287 * Platforms that utilize the dmabounce mechanism must implement
288 * this function.
289 *
290 * The dmabounce routines call this function whenever a dma-mapping
291 * is requested to determine whether a given buffer needs to be bounced
292 * or not. The function must return 0 if the buffer is OK for
293 * DMA access and 1 if the buffer needs to be bounced.
294 *
295 */
296extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
297
298/* 249/*
299 * The DMA API, implemented by dmabounce.c. See below for descriptions. 250 * The DMA API, implemented by dmabounce.c. See below for descriptions.
300 */ 251 */
301extern dma_addr_t __dma_map_single(struct device *, void *, size_t,
302 enum dma_data_direction);
303extern void __dma_unmap_single(struct device *, dma_addr_t, size_t,
304 enum dma_data_direction);
305extern dma_addr_t __dma_map_page(struct device *, struct page *, 252extern dma_addr_t __dma_map_page(struct device *, struct page *,
306 unsigned long, size_t, enum dma_data_direction); 253 unsigned long, size_t, enum dma_data_direction);
307extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, 254extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
@@ -328,13 +275,6 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
328} 275}
329 276
330 277
331static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr,
332 size_t size, enum dma_data_direction dir)
333{
334 __dma_single_cpu_to_dev(cpu_addr, size, dir);
335 return virt_to_dma(dev, cpu_addr);
336}
337
338static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, 278static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
339 unsigned long offset, size_t size, enum dma_data_direction dir) 279 unsigned long offset, size_t size, enum dma_data_direction dir)
340{ 280{
@@ -342,12 +282,6 @@ static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
342 return pfn_to_dma(dev, page_to_pfn(page)) + offset; 282 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
343} 283}
344 284
345static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle,
346 size_t size, enum dma_data_direction dir)
347{
348 __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
349}
350
351static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, 285static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
352 size_t size, enum dma_data_direction dir) 286 size_t size, enum dma_data_direction dir)
353{ 287{
@@ -373,14 +307,18 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
373static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, 307static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
374 size_t size, enum dma_data_direction dir) 308 size_t size, enum dma_data_direction dir)
375{ 309{
310 unsigned long offset;
311 struct page *page;
376 dma_addr_t addr; 312 dma_addr_t addr;
377 313
314 BUG_ON(!virt_addr_valid(cpu_addr));
315 BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
378 BUG_ON(!valid_dma_direction(dir)); 316 BUG_ON(!valid_dma_direction(dir));
379 317
380 addr = __dma_map_single(dev, cpu_addr, size, dir); 318 page = virt_to_page(cpu_addr);
381 debug_dma_map_page(dev, virt_to_page(cpu_addr), 319 offset = (unsigned long)cpu_addr & ~PAGE_MASK;
382 (unsigned long)cpu_addr & ~PAGE_MASK, size, 320 addr = __dma_map_page(dev, page, offset, size, dir);
383 dir, addr, true); 321 debug_dma_map_page(dev, page, offset, size, dir, addr, true);
384 322
385 return addr; 323 return addr;
386} 324}
@@ -430,7 +368,7 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
430 size_t size, enum dma_data_direction dir) 368 size_t size, enum dma_data_direction dir)
431{ 369{
432 debug_dma_unmap_page(dev, handle, size, dir, true); 370 debug_dma_unmap_page(dev, handle, size, dir, true);
433 __dma_unmap_single(dev, handle, size, dir); 371 __dma_unmap_page(dev, handle, size, dir);
434} 372}
435 373
436/** 374/**
diff --git a/arch/arm/include/asm/entry-macro-multi.S b/arch/arm/include/asm/entry-macro-multi.S
index 2da8547de6d6..2f1e2098dfe7 100644
--- a/arch/arm/include/asm/entry-macro-multi.S
+++ b/arch/arm/include/asm/entry-macro-multi.S
@@ -4,8 +4,8 @@
4 * Interrupt handling. Preserves r7, r8, r9 4 * Interrupt handling. Preserves r7, r8, r9
5 */ 5 */
6 .macro arch_irq_handler_default 6 .macro arch_irq_handler_default
7 get_irqnr_preamble r5, lr 7 get_irqnr_preamble r6, lr
81: get_irqnr_and_base r0, r6, r5, lr 81: get_irqnr_and_base r0, r2, r6, lr
9 movne r1, sp 9 movne r1, sp
10 @ 10 @
11 @ routine called with r0 = irq number, r1 = struct pt_regs * 11 @ routine called with r0 = irq number, r1 = struct pt_regs *
@@ -17,17 +17,17 @@
17 /* 17 /*
18 * XXX 18 * XXX
19 * 19 *
20 * this macro assumes that irqstat (r6) and base (r5) are 20 * this macro assumes that irqstat (r2) and base (r6) are
21 * preserved from get_irqnr_and_base above 21 * preserved from get_irqnr_and_base above
22 */ 22 */
23 ALT_SMP(test_for_ipi r0, r6, r5, lr) 23 ALT_SMP(test_for_ipi r0, r2, r6, lr)
24 ALT_UP_B(9997f) 24 ALT_UP_B(9997f)
25 movne r1, sp 25 movne r1, sp
26 adrne lr, BSYM(1b) 26 adrne lr, BSYM(1b)
27 bne do_IPI 27 bne do_IPI
28 28
29#ifdef CONFIG_LOCAL_TIMERS 29#ifdef CONFIG_LOCAL_TIMERS
30 test_for_ltirq r0, r6, r5, lr 30 test_for_ltirq r0, r2, r6, lr
31 movne r0, sp 31 movne r0, sp
32 adrne lr, BSYM(1b) 32 adrne lr, BSYM(1b)
33 bne do_local_timer 33 bne do_local_timer
@@ -40,7 +40,7 @@
40 .align 5 40 .align 5
41 .global \symbol_name 41 .global \symbol_name
42\symbol_name: 42\symbol_name:
43 mov r4, lr 43 mov r8, lr
44 arch_irq_handler_default 44 arch_irq_handler_default
45 mov pc, r4 45 mov pc, r8
46 .endm 46 .endm
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index af44a8fb3480..b8de516e600e 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -204,18 +204,6 @@ static inline unsigned long __phys_to_virt(unsigned long x)
204#endif 204#endif
205 205
206/* 206/*
207 * The DMA mask corresponding to the maximum bus address allocatable
208 * using GFP_DMA. The default here places no restriction on DMA
209 * allocations. This must be the smallest DMA mask in the system,
210 * so a successful GFP_DMA allocation will always satisfy this.
211 */
212#ifndef ARM_DMA_ZONE_SIZE
213#define ISA_DMA_THRESHOLD (0xffffffffULL)
214#else
215#define ISA_DMA_THRESHOLD (PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1)
216#endif
217
218/*
219 * PFNs are used to describe any physical page; this means 207 * PFNs are used to describe any physical page; this means
220 * PFN 0 == physical address 0. 208 * PFN 0 == physical address 0.
221 * 209 *
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index 7544ce6b481a..67c70a31a1be 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -52,7 +52,7 @@ reserve_pmu(enum arm_pmu_type device);
52 * a cookie. 52 * a cookie.
53 */ 53 */
54extern int 54extern int
55release_pmu(struct platform_device *pdev); 55release_pmu(enum arm_pmu_type type);
56 56
57/** 57/**
58 * init_pmu() - Initialise the PMU. 58 * init_pmu() - Initialise the PMU.
diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
index 8ec535e11fd7..633d1cb84d87 100644
--- a/arch/arm/include/asm/proc-fns.h
+++ b/arch/arm/include/asm/proc-fns.h
@@ -82,13 +82,13 @@ extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm);
82extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); 82extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext);
83extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); 83extern void cpu_reset(unsigned long addr) __attribute__((noreturn));
84#else 84#else
85#define cpu_proc_init() processor._proc_init() 85#define cpu_proc_init processor._proc_init
86#define cpu_proc_fin() processor._proc_fin() 86#define cpu_proc_fin processor._proc_fin
87#define cpu_reset(addr) processor.reset(addr) 87#define cpu_reset processor.reset
88#define cpu_do_idle() processor._do_idle() 88#define cpu_do_idle processor._do_idle
89#define cpu_dcache_clean_area(addr,sz) processor.dcache_clean_area(addr,sz) 89#define cpu_dcache_clean_area processor.dcache_clean_area
90#define cpu_set_pte_ext(ptep,pte,ext) processor.set_pte_ext(ptep,pte,ext) 90#define cpu_set_pte_ext processor.set_pte_ext
91#define cpu_do_switch_mm(pgd,mm) processor.switch_mm(pgd,mm) 91#define cpu_do_switch_mm processor.switch_mm
92#endif 92#endif
93 93
94extern void cpu_resume(void); 94extern void cpu_resume(void);
diff --git a/arch/arm/include/asm/scatterlist.h b/arch/arm/include/asm/scatterlist.h
index 2f87870d9347..cefdb8f898a1 100644
--- a/arch/arm/include/asm/scatterlist.h
+++ b/arch/arm/include/asm/scatterlist.h
@@ -1,6 +1,10 @@
1#ifndef _ASMARM_SCATTERLIST_H 1#ifndef _ASMARM_SCATTERLIST_H
2#define _ASMARM_SCATTERLIST_H 2#define _ASMARM_SCATTERLIST_H
3 3
4#ifdef CONFIG_ARM_HAS_SG_CHAIN
5#define ARCH_HAS_SG_CHAIN
6#endif
7
4#include <asm/memory.h> 8#include <asm/memory.h>
5#include <asm/types.h> 9#include <asm/types.h>
6#include <asm-generic/scatterlist.h> 10#include <asm-generic/scatterlist.h>
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index ee2ad8ae07af..915696dd9c7c 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -187,12 +187,16 @@ struct tagtable {
187 187
188#define __tag __used __attribute__((__section__(".taglist.init"))) 188#define __tag __used __attribute__((__section__(".taglist.init")))
189#define __tagtable(tag, fn) \ 189#define __tagtable(tag, fn) \
190static struct tagtable __tagtable_##fn __tag = { tag, fn } 190static const struct tagtable __tagtable_##fn __tag = { tag, fn }
191 191
192/* 192/*
193 * Memory map description 193 * Memory map description
194 */ 194 */
195#define NR_BANKS 8 195#ifdef CONFIG_ARCH_EP93XX
196# define NR_BANKS 16
197#else
198# define NR_BANKS 8
199#endif
196 200
197struct membank { 201struct membank {
198 phys_addr_t start; 202 phys_addr_t start;
diff --git a/arch/arm/include/asm/suspend.h b/arch/arm/include/asm/suspend.h
new file mode 100644
index 000000000000..b0e4e1a02318
--- /dev/null
+++ b/arch/arm/include/asm/suspend.h
@@ -0,0 +1,22 @@
1#ifndef __ASM_ARM_SUSPEND_H
2#define __ASM_ARM_SUSPEND_H
3
4#include <asm/memory.h>
5#include <asm/tlbflush.h>
6
7extern void cpu_resume(void);
8
9/*
10 * Hide the first two arguments to __cpu_suspend - these are an implementation
11 * detail which platform code shouldn't have to know about.
12 */
13static inline int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
14{
15 extern int __cpu_suspend(int, long, unsigned long,
16 int (*)(unsigned long));
17 int ret = __cpu_suspend(0, PHYS_OFFSET - PAGE_OFFSET, arg, fn);
18 flush_tlb_all();
19 return ret;
20}
21
22#endif
diff --git a/arch/arm/include/asm/tcm.h b/arch/arm/include/asm/tcm.h
index 5929ef5d927a..8578d726ad78 100644
--- a/arch/arm/include/asm/tcm.h
+++ b/arch/arm/include/asm/tcm.h
@@ -27,5 +27,7 @@
27 27
28void *tcm_alloc(size_t len); 28void *tcm_alloc(size_t len);
29void tcm_free(void *addr, size_t len); 29void tcm_free(void *addr, size_t len);
30bool tcm_dtcm_present(void);
31bool tcm_itcm_present(void);
30 32
31#endif 33#endif
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index d2005de383b8..8077145698ff 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -34,16 +34,12 @@
34#define TLB_V6_D_ASID (1 << 17) 34#define TLB_V6_D_ASID (1 << 17)
35#define TLB_V6_I_ASID (1 << 18) 35#define TLB_V6_I_ASID (1 << 18)
36 36
37#define TLB_BTB (1 << 28)
38
39/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */ 37/* Unified Inner Shareable TLB operations (ARMv7 MP extensions) */
40#define TLB_V7_UIS_PAGE (1 << 19) 38#define TLB_V7_UIS_PAGE (1 << 19)
41#define TLB_V7_UIS_FULL (1 << 20) 39#define TLB_V7_UIS_FULL (1 << 20)
42#define TLB_V7_UIS_ASID (1 << 21) 40#define TLB_V7_UIS_ASID (1 << 21)
43 41
44/* Inner Shareable BTB operation (ARMv7 MP extensions) */ 42#define TLB_BARRIER (1 << 28)
45#define TLB_V7_IS_BTB (1 << 22)
46
47#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */ 43#define TLB_L2CLEAN_FR (1 << 29) /* Feroceon */
48#define TLB_DCLEAN (1 << 30) 44#define TLB_DCLEAN (1 << 30)
49#define TLB_WB (1 << 31) 45#define TLB_WB (1 << 31)
@@ -58,7 +54,7 @@
58 * v4wb - ARMv4 with write buffer without I TLB flush entry instruction 54 * v4wb - ARMv4 with write buffer without I TLB flush entry instruction
59 * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction 55 * v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
60 * fr - Feroceon (v4wbi with non-outer-cacheable page table walks) 56 * fr - Feroceon (v4wbi with non-outer-cacheable page table walks)
61 * fa - Faraday (v4 with write buffer with UTLB and branch target buffer (BTB)) 57 * fa - Faraday (v4 with write buffer with UTLB)
62 * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction 58 * v6wbi - ARMv6 with write buffer with I TLB flush entry instruction
63 * v7wbi - identical to v6wbi 59 * v7wbi - identical to v6wbi
64 */ 60 */
@@ -99,7 +95,7 @@
99# define v4_always_flags (-1UL) 95# define v4_always_flags (-1UL)
100#endif 96#endif
101 97
102#define fa_tlb_flags (TLB_WB | TLB_BTB | TLB_DCLEAN | \ 98#define fa_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
103 TLB_V4_U_FULL | TLB_V4_U_PAGE) 99 TLB_V4_U_FULL | TLB_V4_U_PAGE)
104 100
105#ifdef CONFIG_CPU_TLB_FA 101#ifdef CONFIG_CPU_TLB_FA
@@ -166,7 +162,7 @@
166# define v4wb_always_flags (-1UL) 162# define v4wb_always_flags (-1UL)
167#endif 163#endif
168 164
169#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BTB | \ 165#define v6wbi_tlb_flags (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
170 TLB_V6_I_FULL | TLB_V6_D_FULL | \ 166 TLB_V6_I_FULL | TLB_V6_D_FULL | \
171 TLB_V6_I_PAGE | TLB_V6_D_PAGE | \ 167 TLB_V6_I_PAGE | TLB_V6_D_PAGE | \
172 TLB_V6_I_ASID | TLB_V6_D_ASID) 168 TLB_V6_I_ASID | TLB_V6_D_ASID)
@@ -184,9 +180,9 @@
184# define v6wbi_always_flags (-1UL) 180# define v6wbi_always_flags (-1UL)
185#endif 181#endif
186 182
187#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_V7_IS_BTB | \ 183#define v7wbi_tlb_flags_smp (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
188 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID) 184 TLB_V7_UIS_FULL | TLB_V7_UIS_PAGE | TLB_V7_UIS_ASID)
189#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BTB | \ 185#define v7wbi_tlb_flags_up (TLB_WB | TLB_DCLEAN | TLB_BARRIER | \
190 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID) 186 TLB_V6_U_FULL | TLB_V6_U_PAGE | TLB_V6_U_ASID)
191 187
192#ifdef CONFIG_CPU_TLB_V7 188#ifdef CONFIG_CPU_TLB_V7
@@ -341,15 +337,7 @@ static inline void local_flush_tlb_all(void)
341 if (tlb_flag(TLB_V7_UIS_FULL)) 337 if (tlb_flag(TLB_V7_UIS_FULL))
342 asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc"); 338 asm("mcr p15, 0, %0, c8, c3, 0" : : "r" (zero) : "cc");
343 339
344 if (tlb_flag(TLB_BTB)) { 340 if (tlb_flag(TLB_BARRIER)) {
345 /* flush the branch target cache */
346 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
347 dsb();
348 isb();
349 }
350 if (tlb_flag(TLB_V7_IS_BTB)) {
351 /* flush the branch target cache */
352 asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
353 dsb(); 341 dsb();
354 isb(); 342 isb();
355 } 343 }
@@ -389,17 +377,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
389 asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc"); 377 asm("mcr p15, 0, %0, c8, c3, 2" : : "r" (asid) : "cc");
390#endif 378#endif
391 379
392 if (tlb_flag(TLB_BTB)) { 380 if (tlb_flag(TLB_BARRIER))
393 /* flush the branch target cache */
394 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
395 dsb();
396 }
397 if (tlb_flag(TLB_V7_IS_BTB)) {
398 /* flush the branch target cache */
399 asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
400 dsb(); 381 dsb();
401 isb();
402 }
403} 382}
404 383
405static inline void 384static inline void
@@ -439,17 +418,8 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
439 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc"); 418 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (uaddr) : "cc");
440#endif 419#endif
441 420
442 if (tlb_flag(TLB_BTB)) { 421 if (tlb_flag(TLB_BARRIER))
443 /* flush the branch target cache */
444 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
445 dsb();
446 }
447 if (tlb_flag(TLB_V7_IS_BTB)) {
448 /* flush the branch target cache */
449 asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
450 dsb(); 422 dsb();
451 isb();
452 }
453} 423}
454 424
455static inline void local_flush_tlb_kernel_page(unsigned long kaddr) 425static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
@@ -482,15 +452,7 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
482 if (tlb_flag(TLB_V7_UIS_PAGE)) 452 if (tlb_flag(TLB_V7_UIS_PAGE))
483 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc"); 453 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (kaddr) : "cc");
484 454
485 if (tlb_flag(TLB_BTB)) { 455 if (tlb_flag(TLB_BARRIER)) {
486 /* flush the branch target cache */
487 asm("mcr p15, 0, %0, c7, c5, 6" : : "r" (zero) : "cc");
488 dsb();
489 isb();
490 }
491 if (tlb_flag(TLB_V7_IS_BTB)) {
492 /* flush the branch target cache */
493 asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero) : "cc");
494 dsb(); 456 dsb();
495 isb(); 457 isb();
496 } 458 }
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index f90756dc16dc..5b29a6673625 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -3,6 +3,9 @@
3 3
4#include <linux/list.h> 4#include <linux/list.h>
5 5
6struct pt_regs;
7struct task_struct;
8
6struct undef_hook { 9struct undef_hook {
7 struct list_head node; 10 struct list_head node;
8 u32 instr_mask; 11 u32 instr_mask;
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 90c62cd51ca9..fa02a22a4c4b 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -29,21 +29,53 @@
29#include <asm/entry-macro-multi.S> 29#include <asm/entry-macro-multi.S>
30 30
31/* 31/*
32 * Interrupt handling. Preserves r7, r8, r9 32 * Interrupt handling.
33 */ 33 */
34 .macro irq_handler 34 .macro irq_handler
35#ifdef CONFIG_MULTI_IRQ_HANDLER 35#ifdef CONFIG_MULTI_IRQ_HANDLER
36 ldr r5, =handle_arch_irq 36 ldr r1, =handle_arch_irq
37 mov r0, sp 37 mov r0, sp
38 ldr r5, [r5] 38 ldr r1, [r1]
39 adr lr, BSYM(9997f) 39 adr lr, BSYM(9997f)
40 teq r5, #0 40 teq r1, #0
41 movne pc, r5 41 movne pc, r1
42#endif 42#endif
43 arch_irq_handler_default 43 arch_irq_handler_default
449997: 449997:
45 .endm 45 .endm
46 46
47 .macro pabt_helper
48 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
49#ifdef MULTI_PABORT
50 ldr ip, .LCprocfns
51 mov lr, pc
52 ldr pc, [ip, #PROCESSOR_PABT_FUNC]
53#else
54 bl CPU_PABORT_HANDLER
55#endif
56 .endm
57
58 .macro dabt_helper
59
60 @
61 @ Call the processor-specific abort handler:
62 @
63 @ r2 - pt_regs
64 @ r4 - aborted context pc
65 @ r5 - aborted context psr
66 @
67 @ The abort handler must return the aborted address in r0, and
68 @ the fault status register in r1. r9 must be preserved.
69 @
70#ifdef MULTI_DABORT
71 ldr ip, .LCprocfns
72 mov lr, pc
73 ldr pc, [ip, #PROCESSOR_DABT_FUNC]
74#else
75 bl CPU_DABORT_HANDLER
76#endif
77 .endm
78
47#ifdef CONFIG_KPROBES 79#ifdef CONFIG_KPROBES
48 .section .kprobes.text,"ax",%progbits 80 .section .kprobes.text,"ax",%progbits
49#else 81#else
@@ -126,106 +158,74 @@ ENDPROC(__und_invalid)
126 SPFIX( subeq sp, sp, #4 ) 158 SPFIX( subeq sp, sp, #4 )
127 stmia sp, {r1 - r12} 159 stmia sp, {r1 - r12}
128 160
129 ldmia r0, {r1 - r3} 161 ldmia r0, {r3 - r5}
130 add r5, sp, #S_SP - 4 @ here for interlock avoidance 162 add r7, sp, #S_SP - 4 @ here for interlock avoidance
131 mov r4, #-1 @ "" "" "" "" 163 mov r6, #-1 @ "" "" "" ""
132 add r0, sp, #(S_FRAME_SIZE + \stack_hole - 4) 164 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
133 SPFIX( addeq r0, r0, #4 ) 165 SPFIX( addeq r2, r2, #4 )
134 str r1, [sp, #-4]! @ save the "real" r0 copied 166 str r3, [sp, #-4]! @ save the "real" r0 copied
135 @ from the exception stack 167 @ from the exception stack
136 168
137 mov r1, lr 169 mov r3, lr
138 170
139 @ 171 @
140 @ We are now ready to fill in the remaining blanks on the stack: 172 @ We are now ready to fill in the remaining blanks on the stack:
141 @ 173 @
142 @ r0 - sp_svc 174 @ r2 - sp_svc
143 @ r1 - lr_svc 175 @ r3 - lr_svc
144 @ r2 - lr_<exception>, already fixed up for correct return/restart 176 @ r4 - lr_<exception>, already fixed up for correct return/restart
145 @ r3 - spsr_<exception> 177 @ r5 - spsr_<exception>
146 @ r4 - orig_r0 (see pt_regs definition in ptrace.h) 178 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
147 @ 179 @
148 stmia r5, {r0 - r4} 180 stmia r7, {r2 - r6}
181
182#ifdef CONFIG_TRACE_IRQFLAGS
183 bl trace_hardirqs_off
184#endif
149 .endm 185 .endm
150 186
151 .align 5 187 .align 5
152__dabt_svc: 188__dabt_svc:
153 svc_entry 189 svc_entry
154
155 @
156 @ get ready to re-enable interrupts if appropriate
157 @
158 mrs r9, cpsr
159 tst r3, #PSR_I_BIT
160 biceq r9, r9, #PSR_I_BIT
161
162 @
163 @ Call the processor-specific abort handler:
164 @
165 @ r2 - aborted context pc
166 @ r3 - aborted context cpsr
167 @
168 @ The abort handler must return the aborted address in r0, and
169 @ the fault status register in r1. r9 must be preserved.
170 @
171#ifdef MULTI_DABORT
172 ldr r4, .LCprocfns
173 mov lr, pc
174 ldr pc, [r4, #PROCESSOR_DABT_FUNC]
175#else
176 bl CPU_DABORT_HANDLER
177#endif
178
179 @
180 @ set desired IRQ state, then call main handler
181 @
182 debug_entry r1
183 msr cpsr_c, r9
184 mov r2, sp 190 mov r2, sp
185 bl do_DataAbort 191 dabt_helper
186 192
187 @ 193 @
188 @ IRQs off again before pulling preserved data off the stack 194 @ IRQs off again before pulling preserved data off the stack
189 @ 195 @
190 disable_irq_notrace 196 disable_irq_notrace
191 197
192 @ 198#ifdef CONFIG_TRACE_IRQFLAGS
193 @ restore SPSR and restart the instruction 199 tst r5, #PSR_I_BIT
194 @ 200 bleq trace_hardirqs_on
195 ldr r2, [sp, #S_PSR] 201 tst r5, #PSR_I_BIT
196 svc_exit r2 @ return from exception 202 blne trace_hardirqs_off
203#endif
204 svc_exit r5 @ return from exception
197 UNWIND(.fnend ) 205 UNWIND(.fnend )
198ENDPROC(__dabt_svc) 206ENDPROC(__dabt_svc)
199 207
200 .align 5 208 .align 5
201__irq_svc: 209__irq_svc:
202 svc_entry 210 svc_entry
211 irq_handler
203 212
204#ifdef CONFIG_TRACE_IRQFLAGS
205 bl trace_hardirqs_off
206#endif
207#ifdef CONFIG_PREEMPT 213#ifdef CONFIG_PREEMPT
208 get_thread_info tsk 214 get_thread_info tsk
209 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count 215 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
210 add r7, r8, #1 @ increment it
211 str r7, [tsk, #TI_PREEMPT]
212#endif
213
214 irq_handler
215#ifdef CONFIG_PREEMPT
216 str r8, [tsk, #TI_PREEMPT] @ restore preempt count
217 ldr r0, [tsk, #TI_FLAGS] @ get flags 216 ldr r0, [tsk, #TI_FLAGS] @ get flags
218 teq r8, #0 @ if preempt count != 0 217 teq r8, #0 @ if preempt count != 0
219 movne r0, #0 @ force flags to 0 218 movne r0, #0 @ force flags to 0
220 tst r0, #_TIF_NEED_RESCHED 219 tst r0, #_TIF_NEED_RESCHED
221 blne svc_preempt 220 blne svc_preempt
222#endif 221#endif
223 ldr r4, [sp, #S_PSR] @ irqs are already disabled 222
224#ifdef CONFIG_TRACE_IRQFLAGS 223#ifdef CONFIG_TRACE_IRQFLAGS
225 tst r4, #PSR_I_BIT 224 @ The parent context IRQs must have been enabled to get here in
226 bleq trace_hardirqs_on 225 @ the first place, so there's no point checking the PSR I bit.
226 bl trace_hardirqs_on
227#endif 227#endif
228 svc_exit r4 @ return from exception 228 svc_exit r5 @ return from exception
229 UNWIND(.fnend ) 229 UNWIND(.fnend )
230ENDPROC(__irq_svc) 230ENDPROC(__irq_svc)
231 231
@@ -251,7 +251,6 @@ __und_svc:
251#else 251#else
252 svc_entry 252 svc_entry
253#endif 253#endif
254
255 @ 254 @
256 @ call emulation code, which returns using r9 if it has emulated 255 @ call emulation code, which returns using r9 if it has emulated
257 @ the instruction, or the more conventional lr if we are to treat 256 @ the instruction, or the more conventional lr if we are to treat
@@ -260,15 +259,16 @@ __und_svc:
260 @ r0 - instruction 259 @ r0 - instruction
261 @ 260 @
262#ifndef CONFIG_THUMB2_KERNEL 261#ifndef CONFIG_THUMB2_KERNEL
263 ldr r0, [r2, #-4] 262 ldr r0, [r4, #-4]
264#else 263#else
265 ldrh r0, [r2, #-2] @ Thumb instruction at LR - 2 264 ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
266 and r9, r0, #0xf800 265 and r9, r0, #0xf800
267 cmp r9, #0xe800 @ 32-bit instruction if xx >= 0 266 cmp r9, #0xe800 @ 32-bit instruction if xx >= 0
268 ldrhhs r9, [r2] @ bottom 16 bits 267 ldrhhs r9, [r4] @ bottom 16 bits
269 orrhs r0, r9, r0, lsl #16 268 orrhs r0, r9, r0, lsl #16
270#endif 269#endif
271 adr r9, BSYM(1f) 270 adr r9, BSYM(1f)
271 mov r2, r4
272 bl call_fpe 272 bl call_fpe
273 273
274 mov r0, sp @ struct pt_regs *regs 274 mov r0, sp @ struct pt_regs *regs
@@ -282,45 +282,35 @@ __und_svc:
282 @ 282 @
283 @ restore SPSR and restart the instruction 283 @ restore SPSR and restart the instruction
284 @ 284 @
285 ldr r2, [sp, #S_PSR] @ Get SVC cpsr 285 ldr r5, [sp, #S_PSR] @ Get SVC cpsr
286 svc_exit r2 @ return from exception 286#ifdef CONFIG_TRACE_IRQFLAGS
287 tst r5, #PSR_I_BIT
288 bleq trace_hardirqs_on
289 tst r5, #PSR_I_BIT
290 blne trace_hardirqs_off
291#endif
292 svc_exit r5 @ return from exception
287 UNWIND(.fnend ) 293 UNWIND(.fnend )
288ENDPROC(__und_svc) 294ENDPROC(__und_svc)
289 295
290 .align 5 296 .align 5
291__pabt_svc: 297__pabt_svc:
292 svc_entry 298 svc_entry
293
294 @
295 @ re-enable interrupts if appropriate
296 @
297 mrs r9, cpsr
298 tst r3, #PSR_I_BIT
299 biceq r9, r9, #PSR_I_BIT
300
301 mov r0, r2 @ pass address of aborted instruction.
302#ifdef MULTI_PABORT
303 ldr r4, .LCprocfns
304 mov lr, pc
305 ldr pc, [r4, #PROCESSOR_PABT_FUNC]
306#else
307 bl CPU_PABORT_HANDLER
308#endif
309 debug_entry r1
310 msr cpsr_c, r9 @ Maybe enable interrupts
311 mov r2, sp @ regs 299 mov r2, sp @ regs
312 bl do_PrefetchAbort @ call abort handler 300 pabt_helper
313 301
314 @ 302 @
315 @ IRQs off again before pulling preserved data off the stack 303 @ IRQs off again before pulling preserved data off the stack
316 @ 304 @
317 disable_irq_notrace 305 disable_irq_notrace
318 306
319 @ 307#ifdef CONFIG_TRACE_IRQFLAGS
320 @ restore SPSR and restart the instruction 308 tst r5, #PSR_I_BIT
321 @ 309 bleq trace_hardirqs_on
322 ldr r2, [sp, #S_PSR] 310 tst r5, #PSR_I_BIT
323 svc_exit r2 @ return from exception 311 blne trace_hardirqs_off
312#endif
313 svc_exit r5 @ return from exception
324 UNWIND(.fnend ) 314 UNWIND(.fnend )
325ENDPROC(__pabt_svc) 315ENDPROC(__pabt_svc)
326 316
@@ -351,23 +341,23 @@ ENDPROC(__pabt_svc)
351 ARM( stmib sp, {r1 - r12} ) 341 ARM( stmib sp, {r1 - r12} )
352 THUMB( stmia sp, {r0 - r12} ) 342 THUMB( stmia sp, {r0 - r12} )
353 343
354 ldmia r0, {r1 - r3} 344 ldmia r0, {r3 - r5}
355 add r0, sp, #S_PC @ here for interlock avoidance 345 add r0, sp, #S_PC @ here for interlock avoidance
356 mov r4, #-1 @ "" "" "" "" 346 mov r6, #-1 @ "" "" "" ""
357 347
358 str r1, [sp] @ save the "real" r0 copied 348 str r3, [sp] @ save the "real" r0 copied
359 @ from the exception stack 349 @ from the exception stack
360 350
361 @ 351 @
362 @ We are now ready to fill in the remaining blanks on the stack: 352 @ We are now ready to fill in the remaining blanks on the stack:
363 @ 353 @
364 @ r2 - lr_<exception>, already fixed up for correct return/restart 354 @ r4 - lr_<exception>, already fixed up for correct return/restart
365 @ r3 - spsr_<exception> 355 @ r5 - spsr_<exception>
366 @ r4 - orig_r0 (see pt_regs definition in ptrace.h) 356 @ r6 - orig_r0 (see pt_regs definition in ptrace.h)
367 @ 357 @
368 @ Also, separately save sp_usr and lr_usr 358 @ Also, separately save sp_usr and lr_usr
369 @ 359 @
370 stmia r0, {r2 - r4} 360 stmia r0, {r4 - r6}
371 ARM( stmdb r0, {sp, lr}^ ) 361 ARM( stmdb r0, {sp, lr}^ )
372 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC ) 362 THUMB( store_user_sp_lr r0, r1, S_SP - S_PC )
373 363
@@ -380,6 +370,10 @@ ENDPROC(__pabt_svc)
380 @ Clear FP to mark the first stack frame 370 @ Clear FP to mark the first stack frame
381 @ 371 @
382 zero_fp 372 zero_fp
373
374#ifdef CONFIG_IRQSOFF_TRACER
375 bl trace_hardirqs_off
376#endif
383 .endm 377 .endm
384 378
385 .macro kuser_cmpxchg_check 379 .macro kuser_cmpxchg_check
@@ -391,7 +385,7 @@ ENDPROC(__pabt_svc)
391 @ if it was interrupted in a critical region. Here we 385 @ if it was interrupted in a critical region. Here we
392 @ perform a quick test inline since it should be false 386 @ perform a quick test inline since it should be false
393 @ 99.9999% of the time. The rest is done out of line. 387 @ 99.9999% of the time. The rest is done out of line.
394 cmp r2, #TASK_SIZE 388 cmp r4, #TASK_SIZE
395 blhs kuser_cmpxchg_fixup 389 blhs kuser_cmpxchg_fixup
396#endif 390#endif
397#endif 391#endif
@@ -401,32 +395,9 @@ ENDPROC(__pabt_svc)
401__dabt_usr: 395__dabt_usr:
402 usr_entry 396 usr_entry
403 kuser_cmpxchg_check 397 kuser_cmpxchg_check
404
405 @
406 @ Call the processor-specific abort handler:
407 @
408 @ r2 - aborted context pc
409 @ r3 - aborted context cpsr
410 @
411 @ The abort handler must return the aborted address in r0, and
412 @ the fault status register in r1.
413 @
414#ifdef MULTI_DABORT
415 ldr r4, .LCprocfns
416 mov lr, pc
417 ldr pc, [r4, #PROCESSOR_DABT_FUNC]
418#else
419 bl CPU_DABORT_HANDLER
420#endif
421
422 @
423 @ IRQs on, then call the main handler
424 @
425 debug_entry r1
426 enable_irq
427 mov r2, sp 398 mov r2, sp
428 adr lr, BSYM(ret_from_exception) 399 dabt_helper
429 b do_DataAbort 400 b ret_from_exception
430 UNWIND(.fnend ) 401 UNWIND(.fnend )
431ENDPROC(__dabt_usr) 402ENDPROC(__dabt_usr)
432 403
@@ -434,28 +405,8 @@ ENDPROC(__dabt_usr)
434__irq_usr: 405__irq_usr:
435 usr_entry 406 usr_entry
436 kuser_cmpxchg_check 407 kuser_cmpxchg_check
437
438#ifdef CONFIG_IRQSOFF_TRACER
439 bl trace_hardirqs_off
440#endif
441
442 get_thread_info tsk
443#ifdef CONFIG_PREEMPT
444 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
445 add r7, r8, #1 @ increment it
446 str r7, [tsk, #TI_PREEMPT]
447#endif
448
449 irq_handler 408 irq_handler
450#ifdef CONFIG_PREEMPT 409 get_thread_info tsk
451 ldr r0, [tsk, #TI_PREEMPT]
452 str r8, [tsk, #TI_PREEMPT]
453 teq r0, r7
454 ARM( strne r0, [r0, -r0] )
455 THUMB( movne r0, #0 )
456 THUMB( strne r0, [r0] )
457#endif
458
459 mov why, #0 410 mov why, #0
460 b ret_to_user_from_irq 411 b ret_to_user_from_irq
461 UNWIND(.fnend ) 412 UNWIND(.fnend )
@@ -467,6 +418,9 @@ ENDPROC(__irq_usr)
467__und_usr: 418__und_usr:
468 usr_entry 419 usr_entry
469 420
421 mov r2, r4
422 mov r3, r5
423
470 @ 424 @
471 @ fall through to the emulation code, which returns using r9 if 425 @ fall through to the emulation code, which returns using r9 if
472 @ it has emulated the instruction, or the more conventional lr 426 @ it has emulated the instruction, or the more conventional lr
@@ -682,19 +636,8 @@ ENDPROC(__und_usr_unknown)
682 .align 5 636 .align 5
683__pabt_usr: 637__pabt_usr:
684 usr_entry 638 usr_entry
685
686 mov r0, r2 @ pass address of aborted instruction.
687#ifdef MULTI_PABORT
688 ldr r4, .LCprocfns
689 mov lr, pc
690 ldr pc, [r4, #PROCESSOR_PABT_FUNC]
691#else
692 bl CPU_PABORT_HANDLER
693#endif
694 debug_entry r1
695 enable_irq @ Enable interrupts
696 mov r2, sp @ regs 639 mov r2, sp @ regs
697 bl do_PrefetchAbort @ call abort handler 640 pabt_helper
698 UNWIND(.fnend ) 641 UNWIND(.fnend )
699 /* fall through */ 642 /* fall through */
700/* 643/*
@@ -927,13 +870,13 @@ __kuser_cmpxchg: @ 0xffff0fc0
927 .text 870 .text
928kuser_cmpxchg_fixup: 871kuser_cmpxchg_fixup:
929 @ Called from kuser_cmpxchg_check macro. 872 @ Called from kuser_cmpxchg_check macro.
930 @ r2 = address of interrupted insn (must be preserved). 873 @ r4 = address of interrupted insn (must be preserved).
931 @ sp = saved regs. r7 and r8 are clobbered. 874 @ sp = saved regs. r7 and r8 are clobbered.
932 @ 1b = first critical insn, 2b = last critical insn. 875 @ 1b = first critical insn, 2b = last critical insn.
933 @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b. 876 @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
934 mov r7, #0xffff0fff 877 mov r7, #0xffff0fff
935 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg))) 878 sub r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
936 subs r8, r2, r7 879 subs r8, r4, r7
937 rsbcss r8, r8, #(2b - 1b) 880 rsbcss r8, r8, #(2b - 1b)
938 strcs r7, [sp, #S_PC] 881 strcs r7, [sp, #S_PC]
939 mov pc, lr 882 mov pc, lr
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 051166c2a932..4d6ad8348e89 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -165,25 +165,6 @@
165 .endm 165 .endm
166#endif /* !CONFIG_THUMB2_KERNEL */ 166#endif /* !CONFIG_THUMB2_KERNEL */
167 167
168 @
169 @ Debug exceptions are taken as prefetch or data aborts.
170 @ We must disable preemption during the handler so that
171 @ we can access the debug registers safely.
172 @
173 .macro debug_entry, fsr
174#if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT)
175 ldr r4, =0x40f @ mask out fsr.fs
176 and r5, r4, \fsr
177 cmp r5, #2 @ debug exception
178 bne 1f
179 get_thread_info r10
180 ldr r6, [r10, #TI_PREEMPT] @ get preempt count
181 add r11, r6, #1 @ increment it
182 str r11, [r10, #TI_PREEMPT]
1831:
184#endif
185 .endm
186
187/* 168/*
188 * These are the registers used in the syscall handler, and allow us to 169 * These are the registers used in the syscall handler, and allow us to
189 * have in theory up to 7 arguments to a function - r0 to r6. 170 * have in theory up to 7 arguments to a function - r0 to r6.
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 6b1e0ad9ec3b..d46f25968bec 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -32,8 +32,16 @@
32 * numbers for r1. 32 * numbers for r1.
33 * 33 *
34 */ 34 */
35 .arm
36
35 __HEAD 37 __HEAD
36ENTRY(stext) 38ENTRY(stext)
39
40 THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
41 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
42 THUMB( .thumb ) @ switch to Thumb now.
43 THUMB(1: )
44
37 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 45 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
38 @ and irqs disabled 46 @ and irqs disabled
39#ifndef CONFIG_CPU_CP15 47#ifndef CONFIG_CPU_CP15
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 278c1b0ebb2e..742b6108a001 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -71,8 +71,16 @@
71 * crap here - that's what the boot loader (or in extreme, well justified 71 * crap here - that's what the boot loader (or in extreme, well justified
72 * circumstances, zImage) is for. 72 * circumstances, zImage) is for.
73 */ 73 */
74 .arm
75
74 __HEAD 76 __HEAD
75ENTRY(stext) 77ENTRY(stext)
78
79 THUMB( adr r9, BSYM(1f) ) @ Kernel is always entered in ARM.
80 THUMB( bx r9 ) @ If this is a Thumb-2 kernel,
81 THUMB( .thumb ) @ switch to Thumb now.
82 THUMB(1: )
83
76 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode 84 setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9 @ ensure svc mode
77 @ and irqs disabled 85 @ and irqs disabled
78 mrc p15, 0, r9, c0, c0 @ get processor id 86 mrc p15, 0, r9, c0, c0 @ get processor id
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 87acc25d7a3e..a927ca1f5566 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -796,7 +796,7 @@ unlock:
796 796
797/* 797/*
798 * Called from either the Data Abort Handler [watchpoint] or the 798 * Called from either the Data Abort Handler [watchpoint] or the
799 * Prefetch Abort Handler [breakpoint] with preemption disabled. 799 * Prefetch Abort Handler [breakpoint] with interrupts disabled.
800 */ 800 */
801static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, 801static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
802 struct pt_regs *regs) 802 struct pt_regs *regs)
@@ -804,8 +804,10 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
804 int ret = 0; 804 int ret = 0;
805 u32 dscr; 805 u32 dscr;
806 806
807 /* We must be called with preemption disabled. */ 807 preempt_disable();
808 WARN_ON(preemptible()); 808
809 if (interrupts_enabled(regs))
810 local_irq_enable();
809 811
810 /* We only handle watchpoints and hardware breakpoints. */ 812 /* We only handle watchpoints and hardware breakpoints. */
811 ARM_DBG_READ(c1, 0, dscr); 813 ARM_DBG_READ(c1, 0, dscr);
@@ -824,10 +826,6 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr,
824 ret = 1; /* Unhandled fault. */ 826 ret = 1; /* Unhandled fault. */
825 } 827 }
826 828
827 /*
828 * Re-enable preemption after it was disabled in the
829 * low-level exception handling code.
830 */
831 preempt_enable(); 829 preempt_enable();
832 830
833 return ret; 831 return ret;
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 83bbad03fcc6..0f928a131af8 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -131,54 +131,63 @@ int __init arch_probe_nr_irqs(void)
131 131
132#ifdef CONFIG_HOTPLUG_CPU 132#ifdef CONFIG_HOTPLUG_CPU
133 133
134static bool migrate_one_irq(struct irq_data *d) 134static bool migrate_one_irq(struct irq_desc *desc)
135{ 135{
136 unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask); 136 struct irq_data *d = irq_desc_get_irq_data(desc);
137 const struct cpumask *affinity = d->affinity;
138 struct irq_chip *c;
137 bool ret = false; 139 bool ret = false;
138 140
139 if (cpu >= nr_cpu_ids) { 141 /*
140 cpu = cpumask_any(cpu_online_mask); 142 * If this is a per-CPU interrupt, or the affinity does not
143 * include this CPU, then we have nothing to do.
144 */
145 if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
146 return false;
147
148 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
149 affinity = cpu_online_mask;
141 ret = true; 150 ret = true;
142 } 151 }
143 152
144 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu); 153 c = irq_data_get_irq_chip(d);
145 154 if (c->irq_set_affinity)
146 d->chip->irq_set_affinity(d, cpumask_of(cpu), true); 155 c->irq_set_affinity(d, affinity, true);
156 else
157 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
147 158
148 return ret; 159 return ret;
149} 160}
150 161
151/* 162/*
152 * The CPU has been marked offline. Migrate IRQs off this CPU. If 163 * The current CPU has been marked offline. Migrate IRQs off this CPU.
153 * the affinity settings do not allow other CPUs, force them onto any 164 * If the affinity settings do not allow other CPUs, force them onto any
154 * available CPU. 165 * available CPU.
166 *
167 * Note: we must iterate over all IRQs, whether they have an attached
168 * action structure or not, as we need to get chained interrupts too.
155 */ 169 */
156void migrate_irqs(void) 170void migrate_irqs(void)
157{ 171{
158 unsigned int i, cpu = smp_processor_id(); 172 unsigned int i;
159 struct irq_desc *desc; 173 struct irq_desc *desc;
160 unsigned long flags; 174 unsigned long flags;
161 175
162 local_irq_save(flags); 176 local_irq_save(flags);
163 177
164 for_each_irq_desc(i, desc) { 178 for_each_irq_desc(i, desc) {
165 struct irq_data *d = &desc->irq_data;
166 bool affinity_broken = false; 179 bool affinity_broken = false;
167 180
168 raw_spin_lock(&desc->lock); 181 if (!desc)
169 do { 182 continue;
170 if (desc->action == NULL)
171 break;
172
173 if (d->node != cpu)
174 break;
175 183
176 affinity_broken = migrate_one_irq(d); 184 raw_spin_lock(&desc->lock);
177 } while (0); 185 affinity_broken = migrate_one_irq(desc);
178 raw_spin_unlock(&desc->lock); 186 raw_spin_unlock(&desc->lock);
179 187
180 if (affinity_broken && printk_ratelimit()) 188 if (affinity_broken && printk_ratelimit())
181 pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu); 189 pr_warning("IRQ%u no longer affine to CPU%u\n", i,
190 smp_processor_id());
182 } 191 }
183 192
184 local_irq_restore(flags); 193 local_irq_restore(flags);
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index d53c0abc4dd3..8d8507858e5c 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -435,7 +435,7 @@ armpmu_reserve_hardware(void)
435 if (irq >= 0) 435 if (irq >= 0)
436 free_irq(irq, NULL); 436 free_irq(irq, NULL);
437 } 437 }
438 release_pmu(pmu_device); 438 release_pmu(ARM_PMU_DEVICE_CPU);
439 pmu_device = NULL; 439 pmu_device = NULL;
440 } 440 }
441 441
@@ -454,7 +454,7 @@ armpmu_release_hardware(void)
454 } 454 }
455 armpmu->stop(); 455 armpmu->stop();
456 456
457 release_pmu(pmu_device); 457 release_pmu(ARM_PMU_DEVICE_CPU);
458 pmu_device = NULL; 458 pmu_device = NULL;
459} 459}
460 460
@@ -583,7 +583,7 @@ static int armpmu_event_init(struct perf_event *event)
583static void armpmu_enable(struct pmu *pmu) 583static void armpmu_enable(struct pmu *pmu)
584{ 584{
585 /* Enable all of the perf events on hardware. */ 585 /* Enable all of the perf events on hardware. */
586 int idx; 586 int idx, enabled = 0;
587 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 587 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
588 588
589 if (!armpmu) 589 if (!armpmu)
@@ -596,9 +596,11 @@ static void armpmu_enable(struct pmu *pmu)
596 continue; 596 continue;
597 597
598 armpmu->enable(&event->hw, idx); 598 armpmu->enable(&event->hw, idx);
599 enabled = 1;
599 } 600 }
600 601
601 armpmu->start(); 602 if (enabled)
603 armpmu->start();
602} 604}
603 605
604static void armpmu_disable(struct pmu *pmu) 606static void armpmu_disable(struct pmu *pmu)
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index 2c79eec19262..2b70709376c3 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -17,6 +17,7 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/of_device.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
21 22
22#include <asm/pmu.h> 23#include <asm/pmu.h>
@@ -25,36 +26,88 @@ static volatile long pmu_lock;
25 26
26static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES]; 27static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES];
27 28
28static int __devinit pmu_device_probe(struct platform_device *pdev) 29static int __devinit pmu_register(struct platform_device *pdev,
30 enum arm_pmu_type type)
29{ 31{
30 32 if (type < 0 || type >= ARM_NUM_PMU_DEVICES) {
31 if (pdev->id < 0 || pdev->id >= ARM_NUM_PMU_DEVICES) {
32 pr_warning("received registration request for unknown " 33 pr_warning("received registration request for unknown "
33 "device %d\n", pdev->id); 34 "device %d\n", type);
34 return -EINVAL; 35 return -EINVAL;
35 } 36 }
36 37
37 if (pmu_devices[pdev->id]) 38 if (pmu_devices[type]) {
38 pr_warning("registering new PMU device type %d overwrites " 39 pr_warning("rejecting duplicate registration of PMU device "
39 "previous registration!\n", pdev->id); 40 "type %d.", type);
40 else 41 return -ENOSPC;
41 pr_info("registered new PMU device of type %d\n", 42 }
42 pdev->id);
43 43
44 pmu_devices[pdev->id] = pdev; 44 pr_info("registered new PMU device of type %d\n", type);
45 pmu_devices[type] = pdev;
45 return 0; 46 return 0;
46} 47}
47 48
48static struct platform_driver pmu_driver = { 49#define OF_MATCH_PMU(_name, _type) { \
50 .compatible = _name, \
51 .data = (void *)_type, \
52}
53
54#define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU)
55
56static struct of_device_id armpmu_of_device_ids[] = {
57 OF_MATCH_CPU("arm,cortex-a9-pmu"),
58 OF_MATCH_CPU("arm,cortex-a8-pmu"),
59 OF_MATCH_CPU("arm,arm1136-pmu"),
60 OF_MATCH_CPU("arm,arm1176-pmu"),
61 {},
62};
63
64#define PLAT_MATCH_PMU(_name, _type) { \
65 .name = _name, \
66 .driver_data = _type, \
67}
68
69#define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU)
70
71static struct platform_device_id armpmu_plat_device_ids[] = {
72 PLAT_MATCH_CPU("arm-pmu"),
73 {},
74};
75
76enum arm_pmu_type armpmu_device_type(struct platform_device *pdev)
77{
78 const struct of_device_id *of_id;
79 const struct platform_device_id *pdev_id;
80
81 /* provided by of_device_id table */
82 if (pdev->dev.of_node) {
83 of_id = of_match_device(armpmu_of_device_ids, &pdev->dev);
84 BUG_ON(!of_id);
85 return (enum arm_pmu_type)of_id->data;
86 }
87
88 /* Provided by platform_device_id table */
89 pdev_id = platform_get_device_id(pdev);
90 BUG_ON(!pdev_id);
91 return pdev_id->driver_data;
92}
93
94static int __devinit armpmu_device_probe(struct platform_device *pdev)
95{
96 return pmu_register(pdev, armpmu_device_type(pdev));
97}
98
99static struct platform_driver armpmu_driver = {
49 .driver = { 100 .driver = {
50 .name = "arm-pmu", 101 .name = "arm-pmu",
102 .of_match_table = armpmu_of_device_ids,
51 }, 103 },
52 .probe = pmu_device_probe, 104 .probe = armpmu_device_probe,
105 .id_table = armpmu_plat_device_ids,
53}; 106};
54 107
55static int __init register_pmu_driver(void) 108static int __init register_pmu_driver(void)
56{ 109{
57 return platform_driver_register(&pmu_driver); 110 return platform_driver_register(&armpmu_driver);
58} 111}
59device_initcall(register_pmu_driver); 112device_initcall(register_pmu_driver);
60 113
@@ -77,11 +130,11 @@ reserve_pmu(enum arm_pmu_type device)
77EXPORT_SYMBOL_GPL(reserve_pmu); 130EXPORT_SYMBOL_GPL(reserve_pmu);
78 131
79int 132int
80release_pmu(struct platform_device *pdev) 133release_pmu(enum arm_pmu_type device)
81{ 134{
82 if (WARN_ON(pdev != pmu_devices[pdev->id])) 135 if (WARN_ON(!pmu_devices[device]))
83 return -EINVAL; 136 return -EINVAL;
84 clear_bit_unlock(pdev->id, &pmu_lock); 137 clear_bit_unlock(device, &pmu_lock);
85 return 0; 138 return 0;
86} 139}
87EXPORT_SYMBOL_GPL(release_pmu); 140EXPORT_SYMBOL_GPL(release_pmu);
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index ed11fb08b05a..9c3278f37796 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -73,6 +73,7 @@ __setup("fpe=", fpe_setup);
73#endif 73#endif
74 74
75extern void paging_init(struct machine_desc *desc); 75extern void paging_init(struct machine_desc *desc);
76extern void sanity_check_meminfo(void);
76extern void reboot_setup(char *str); 77extern void reboot_setup(char *str);
77 78
78unsigned int processor_id; 79unsigned int processor_id;
@@ -342,54 +343,6 @@ static void __init feat_v6_fixup(void)
342 elf_hwcap &= ~HWCAP_TLS; 343 elf_hwcap &= ~HWCAP_TLS;
343} 344}
344 345
345static void __init setup_processor(void)
346{
347 struct proc_info_list *list;
348
349 /*
350 * locate processor in the list of supported processor
351 * types. The linker builds this table for us from the
352 * entries in arch/arm/mm/proc-*.S
353 */
354 list = lookup_processor_type(read_cpuid_id());
355 if (!list) {
356 printk("CPU configuration botched (ID %08x), unable "
357 "to continue.\n", read_cpuid_id());
358 while (1);
359 }
360
361 cpu_name = list->cpu_name;
362
363#ifdef MULTI_CPU
364 processor = *list->proc;
365#endif
366#ifdef MULTI_TLB
367 cpu_tlb = *list->tlb;
368#endif
369#ifdef MULTI_USER
370 cpu_user = *list->user;
371#endif
372#ifdef MULTI_CACHE
373 cpu_cache = *list->cache;
374#endif
375
376 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
377 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
378 proc_arch[cpu_architecture()], cr_alignment);
379
380 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
381 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
382 elf_hwcap = list->elf_hwcap;
383#ifndef CONFIG_ARM_THUMB
384 elf_hwcap &= ~HWCAP_THUMB;
385#endif
386
387 feat_v6_fixup();
388
389 cacheid_init();
390 cpu_proc_init();
391}
392
393/* 346/*
394 * cpu_init - initialise one CPU. 347 * cpu_init - initialise one CPU.
395 * 348 *
@@ -405,6 +358,8 @@ void cpu_init(void)
405 BUG(); 358 BUG();
406 } 359 }
407 360
361 cpu_proc_init();
362
408 /* 363 /*
409 * Define the placement constraint for the inline asm directive below. 364 * Define the placement constraint for the inline asm directive below.
410 * In Thumb-2, msr with an immediate value is not allowed. 365 * In Thumb-2, msr with an immediate value is not allowed.
@@ -441,6 +396,54 @@ void cpu_init(void)
441 : "r14"); 396 : "r14");
442} 397}
443 398
399static void __init setup_processor(void)
400{
401 struct proc_info_list *list;
402
403 /*
404 * locate processor in the list of supported processor
405 * types. The linker builds this table for us from the
406 * entries in arch/arm/mm/proc-*.S
407 */
408 list = lookup_processor_type(read_cpuid_id());
409 if (!list) {
410 printk("CPU configuration botched (ID %08x), unable "
411 "to continue.\n", read_cpuid_id());
412 while (1);
413 }
414
415 cpu_name = list->cpu_name;
416
417#ifdef MULTI_CPU
418 processor = *list->proc;
419#endif
420#ifdef MULTI_TLB
421 cpu_tlb = *list->tlb;
422#endif
423#ifdef MULTI_USER
424 cpu_user = *list->user;
425#endif
426#ifdef MULTI_CACHE
427 cpu_cache = *list->cache;
428#endif
429
430 printk("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
431 cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
432 proc_arch[cpu_architecture()], cr_alignment);
433
434 sprintf(init_utsname()->machine, "%s%c", list->arch_name, ENDIANNESS);
435 sprintf(elf_platform, "%s%c", list->elf_name, ENDIANNESS);
436 elf_hwcap = list->elf_hwcap;
437#ifndef CONFIG_ARM_THUMB
438 elf_hwcap &= ~HWCAP_THUMB;
439#endif
440
441 feat_v6_fixup();
442
443 cacheid_init();
444 cpu_init();
445}
446
444void __init dump_machine_table(void) 447void __init dump_machine_table(void)
445{ 448{
446 struct machine_desc *p; 449 struct machine_desc *p;
@@ -900,6 +903,7 @@ void __init setup_arch(char **cmdline_p)
900 903
901 parse_early_param(); 904 parse_early_param();
902 905
906 sanity_check_meminfo();
903 arm_memblock_init(&meminfo, mdesc); 907 arm_memblock_init(&meminfo, mdesc);
904 908
905 paging_init(mdesc); 909 paging_init(mdesc);
@@ -913,7 +917,6 @@ void __init setup_arch(char **cmdline_p)
913#endif 917#endif
914 reserve_crashkernel(); 918 reserve_crashkernel();
915 919
916 cpu_init();
917 tcm_init(); 920 tcm_init();
918 921
919#ifdef CONFIG_MULTI_IRQ_HANDLER 922#ifdef CONFIG_MULTI_IRQ_HANDLER
diff --git a/arch/arm/kernel/sleep.S b/arch/arm/kernel/sleep.S
index 6398ead9d1c0..dc902f2c6845 100644
--- a/arch/arm/kernel/sleep.S
+++ b/arch/arm/kernel/sleep.S
@@ -10,64 +10,61 @@
10/* 10/*
11 * Save CPU state for a suspend 11 * Save CPU state for a suspend
12 * r1 = v:p offset 12 * r1 = v:p offset
13 * r3 = virtual return function 13 * r2 = suspend function arg0
14 * Note: sp is decremented to allocate space for CPU state on stack 14 * r3 = suspend function
15 * r0-r3,r9,r10,lr corrupted
16 */ 15 */
17ENTRY(cpu_suspend) 16ENTRY(__cpu_suspend)
18 mov r9, lr 17 stmfd sp!, {r4 - r11, lr}
19#ifdef MULTI_CPU 18#ifdef MULTI_CPU
20 ldr r10, =processor 19 ldr r10, =processor
21 mov r2, sp @ current virtual SP 20 ldr r5, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
22 ldr r0, [r10, #CPU_SLEEP_SIZE] @ size of CPU sleep state
23 ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function 21 ldr ip, [r10, #CPU_DO_RESUME] @ virtual resume function
24 sub sp, sp, r0 @ allocate CPU state on stack 22#else
25 mov r0, sp @ save pointer 23 ldr r5, =cpu_suspend_size
24 ldr ip, =cpu_do_resume
25#endif
26 mov r6, sp @ current virtual SP
27 sub sp, sp, r5 @ allocate CPU state on stack
28 mov r0, sp @ save pointer to CPU save block
26 add ip, ip, r1 @ convert resume fn to phys 29 add ip, ip, r1 @ convert resume fn to phys
27 stmfd sp!, {r1, r2, r3, ip} @ save v:p, virt SP, retfn, phys resume fn 30 stmfd sp!, {r1, r6, ip} @ save v:p, virt SP, phys resume fn
28 ldr r3, =sleep_save_sp 31 ldr r5, =sleep_save_sp
29 add r2, sp, r1 @ convert SP to phys 32 add r6, sp, r1 @ convert SP to phys
33 stmfd sp!, {r2, r3} @ save suspend func arg and pointer
30#ifdef CONFIG_SMP 34#ifdef CONFIG_SMP
31 ALT_SMP(mrc p15, 0, lr, c0, c0, 5) 35 ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
32 ALT_UP(mov lr, #0) 36 ALT_UP(mov lr, #0)
33 and lr, lr, #15 37 and lr, lr, #15
34 str r2, [r3, lr, lsl #2] @ save phys SP 38 str r6, [r5, lr, lsl #2] @ save phys SP
35#else 39#else
36 str r2, [r3] @ save phys SP 40 str r6, [r5] @ save phys SP
37#endif 41#endif
42#ifdef MULTI_CPU
38 mov lr, pc 43 mov lr, pc
39 ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state 44 ldr pc, [r10, #CPU_DO_SUSPEND] @ save CPU state
40#else 45#else
41 mov r2, sp @ current virtual SP
42 ldr r0, =cpu_suspend_size
43 sub sp, sp, r0 @ allocate CPU state on stack
44 mov r0, sp @ save pointer
45 stmfd sp!, {r1, r2, r3} @ save v:p, virt SP, return fn
46 ldr r3, =sleep_save_sp
47 add r2, sp, r1 @ convert SP to phys
48#ifdef CONFIG_SMP
49 ALT_SMP(mrc p15, 0, lr, c0, c0, 5)
50 ALT_UP(mov lr, #0)
51 and lr, lr, #15
52 str r2, [r3, lr, lsl #2] @ save phys SP
53#else
54 str r2, [r3] @ save phys SP
55#endif
56 bl cpu_do_suspend 46 bl cpu_do_suspend
57#endif 47#endif
58 48
59 @ flush data cache 49 @ flush data cache
60#ifdef MULTI_CACHE 50#ifdef MULTI_CACHE
61 ldr r10, =cpu_cache 51 ldr r10, =cpu_cache
62 mov lr, r9 52 mov lr, pc
63 ldr pc, [r10, #CACHE_FLUSH_KERN_ALL] 53 ldr pc, [r10, #CACHE_FLUSH_KERN_ALL]
64#else 54#else
65 mov lr, r9 55 bl __cpuc_flush_kern_all
66 b __cpuc_flush_kern_all
67#endif 56#endif
68ENDPROC(cpu_suspend) 57 adr lr, BSYM(cpu_suspend_abort)
58 ldmfd sp!, {r0, pc} @ call suspend fn
59ENDPROC(__cpu_suspend)
69 .ltorg 60 .ltorg
70 61
62cpu_suspend_abort:
63 ldmia sp!, {r1 - r3} @ pop v:p, virt SP, phys resume fn
64 mov sp, r2
65 ldmfd sp!, {r4 - r11, pc}
66ENDPROC(cpu_suspend_abort)
67
71/* 68/*
72 * r0 = control register value 69 * r0 = control register value
73 * r1 = v:p offset (preserved by cpu_do_resume) 70 * r1 = v:p offset (preserved by cpu_do_resume)
@@ -97,7 +94,9 @@ ENDPROC(cpu_resume_turn_mmu_on)
97cpu_resume_after_mmu: 94cpu_resume_after_mmu:
98 str r5, [r2, r4, lsl #2] @ restore old mapping 95 str r5, [r2, r4, lsl #2] @ restore old mapping
99 mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache 96 mcr p15, 0, r0, c1, c0, 0 @ turn on D-cache
100 mov pc, lr 97 bl cpu_init @ restore the und/abt/irq banked regs
98 mov r0, #0 @ return zero on success
99 ldmfd sp!, {r4 - r11, pc}
101ENDPROC(cpu_resume_after_mmu) 100ENDPROC(cpu_resume_after_mmu)
102 101
103/* 102/*
@@ -120,20 +119,11 @@ ENTRY(cpu_resume)
120 ldr r0, sleep_save_sp @ stack phys addr 119 ldr r0, sleep_save_sp @ stack phys addr
121#endif 120#endif
122 setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off 121 setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off
123#ifdef MULTI_CPU 122 @ load v:p, stack, resume fn
124 @ load v:p, stack, return fn, resume fn 123 ARM( ldmia r0!, {r1, sp, pc} )
125 ARM( ldmia r0!, {r1, sp, lr, pc} ) 124THUMB( ldmia r0!, {r1, r2, r3} )
126THUMB( ldmia r0!, {r1, r2, r3, r4} )
127THUMB( mov sp, r2 ) 125THUMB( mov sp, r2 )
128THUMB( mov lr, r3 ) 126THUMB( bx r3 )
129THUMB( bx r4 )
130#else
131 @ load v:p, stack, return fn
132 ARM( ldmia r0!, {r1, sp, lr} )
133THUMB( ldmia r0!, {r1, r2, lr} )
134THUMB( mov sp, r2 )
135 b cpu_do_resume
136#endif
137ENDPROC(cpu_resume) 127ENDPROC(cpu_resume)
138 128
139sleep_save_sp: 129sleep_save_sp:
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index e7f92a4321f3..167e3cbe1f2f 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -365,8 +365,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
365 */ 365 */
366 if (max_cpus > ncores) 366 if (max_cpus > ncores)
367 max_cpus = ncores; 367 max_cpus = ncores;
368 368 if (ncores > 1 && max_cpus) {
369 if (max_cpus > 1) {
370 /* 369 /*
371 * Enable the local timer or broadcast device for the 370 * Enable the local timer or broadcast device for the
372 * boot CPU, but only if we have more than one CPU. 371 * boot CPU, but only if we have more than one CPU.
@@ -374,6 +373,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
374 percpu_timer_setup(); 373 percpu_timer_setup();
375 374
376 /* 375 /*
376 * Initialise the present map, which describes the set of CPUs
377 * actually populated at the present time. A platform should
378 * re-initialize the map in platform_smp_prepare_cpus() if
379 * present != possible (e.g. physical hotplug).
380 */
381 init_cpu_present(&cpu_possible_map);
382
383 /*
377 * Initialise the SCU if there are more than one CPU 384 * Initialise the SCU if there are more than one CPU
378 * and let them know where to start. 385 * and let them know where to start.
379 */ 386 */
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c
index a1e757c3439b..79ed5e7f204a 100644
--- a/arch/arm/kernel/smp_scu.c
+++ b/arch/arm/kernel/smp_scu.c
@@ -20,6 +20,7 @@
20#define SCU_INVALIDATE 0x0c 20#define SCU_INVALIDATE 0x0c
21#define SCU_FPGA_REVISION 0x10 21#define SCU_FPGA_REVISION 0x10
22 22
23#ifdef CONFIG_SMP
23/* 24/*
24 * Get the number of CPU cores from the SCU configuration 25 * Get the number of CPU cores from the SCU configuration
25 */ 26 */
@@ -50,6 +51,7 @@ void __init scu_enable(void __iomem *scu_base)
50 */ 51 */
51 flush_cache_all(); 52 flush_cache_all();
52} 53}
54#endif
53 55
54/* 56/*
55 * Set the executing CPUs power mode as defined. This will be in 57 * Set the executing CPUs power mode as defined. This will be in
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 60636f499cb3..2c277d40cee6 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -115,7 +115,7 @@ static void __cpuinit twd_calibrate_rate(void)
115 twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5); 115 twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
116 116
117 printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000, 117 printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000,
118 (twd_timer_rate / 1000000) % 100); 118 (twd_timer_rate / 10000) % 100);
119 } 119 }
120} 120}
121 121
diff --git a/arch/arm/kernel/tcm.c b/arch/arm/kernel/tcm.c
index f5cf660eefcc..30e302d33e0a 100644
--- a/arch/arm/kernel/tcm.c
+++ b/arch/arm/kernel/tcm.c
@@ -19,6 +19,8 @@
19#include "tcm.h" 19#include "tcm.h"
20 20
21static struct gen_pool *tcm_pool; 21static struct gen_pool *tcm_pool;
22static bool dtcm_present;
23static bool itcm_present;
22 24
23/* TCM section definitions from the linker */ 25/* TCM section definitions from the linker */
24extern char __itcm_start, __sitcm_text, __eitcm_text; 26extern char __itcm_start, __sitcm_text, __eitcm_text;
@@ -90,6 +92,18 @@ void tcm_free(void *addr, size_t len)
90} 92}
91EXPORT_SYMBOL(tcm_free); 93EXPORT_SYMBOL(tcm_free);
92 94
95bool tcm_dtcm_present(void)
96{
97 return dtcm_present;
98}
99EXPORT_SYMBOL(tcm_dtcm_present);
100
101bool tcm_itcm_present(void)
102{
103 return itcm_present;
104}
105EXPORT_SYMBOL(tcm_itcm_present);
106
93static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks, 107static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks,
94 u32 *offset) 108 u32 *offset)
95{ 109{
@@ -134,6 +148,10 @@ static int __init setup_tcm_bank(u8 type, u8 bank, u8 banks,
134 (tcm_region & 1) ? "" : "not "); 148 (tcm_region & 1) ? "" : "not ");
135 } 149 }
136 150
151 /* Not much fun you can do with a size 0 bank */
152 if (tcm_size == 0)
153 return 0;
154
137 /* Force move the TCM bank to where we want it, enable */ 155 /* Force move the TCM bank to where we want it, enable */
138 tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1; 156 tcm_region = *offset | (tcm_region & 0x00000ffeU) | 1;
139 157
@@ -165,12 +183,20 @@ void __init tcm_init(void)
165 u32 tcm_status = read_cpuid_tcmstatus(); 183 u32 tcm_status = read_cpuid_tcmstatus();
166 u8 dtcm_banks = (tcm_status >> 16) & 0x03; 184 u8 dtcm_banks = (tcm_status >> 16) & 0x03;
167 u8 itcm_banks = (tcm_status & 0x03); 185 u8 itcm_banks = (tcm_status & 0x03);
186 size_t dtcm_code_sz = &__edtcm_data - &__sdtcm_data;
187 size_t itcm_code_sz = &__eitcm_text - &__sitcm_text;
168 char *start; 188 char *start;
169 char *end; 189 char *end;
170 char *ram; 190 char *ram;
171 int ret; 191 int ret;
172 int i; 192 int i;
173 193
194 /* Values greater than 2 for D/ITCM banks are "reserved" */
195 if (dtcm_banks > 2)
196 dtcm_banks = 0;
197 if (itcm_banks > 2)
198 itcm_banks = 0;
199
174 /* Setup DTCM if present */ 200 /* Setup DTCM if present */
175 if (dtcm_banks > 0) { 201 if (dtcm_banks > 0) {
176 for (i = 0; i < dtcm_banks; i++) { 202 for (i = 0; i < dtcm_banks; i++) {
@@ -178,6 +204,13 @@ void __init tcm_init(void)
178 if (ret) 204 if (ret)
179 return; 205 return;
180 } 206 }
207 /* This means you compiled more code than fits into DTCM */
208 if (dtcm_code_sz > (dtcm_end - DTCM_OFFSET)) {
209 pr_info("CPU DTCM: %u bytes of code compiled to "
210 "DTCM but only %lu bytes of DTCM present\n",
211 dtcm_code_sz, (dtcm_end - DTCM_OFFSET));
212 goto no_dtcm;
213 }
181 dtcm_res.end = dtcm_end - 1; 214 dtcm_res.end = dtcm_end - 1;
182 request_resource(&iomem_resource, &dtcm_res); 215 request_resource(&iomem_resource, &dtcm_res);
183 dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET; 216 dtcm_iomap[0].length = dtcm_end - DTCM_OFFSET;
@@ -186,12 +219,16 @@ void __init tcm_init(void)
186 start = &__sdtcm_data; 219 start = &__sdtcm_data;
187 end = &__edtcm_data; 220 end = &__edtcm_data;
188 ram = &__dtcm_start; 221 ram = &__dtcm_start;
189 /* This means you compiled more code than fits into DTCM */ 222 memcpy(start, ram, dtcm_code_sz);
190 BUG_ON((end - start) > (dtcm_end - DTCM_OFFSET)); 223 pr_debug("CPU DTCM: copied data from %p - %p\n",
191 memcpy(start, ram, (end-start)); 224 start, end);
192 pr_debug("CPU DTCM: copied data from %p - %p\n", start, end); 225 dtcm_present = true;
226 } else if (dtcm_code_sz) {
227 pr_info("CPU DTCM: %u bytes of code compiled to DTCM but no "
228 "DTCM banks present in CPU\n", dtcm_code_sz);
193 } 229 }
194 230
231no_dtcm:
195 /* Setup ITCM if present */ 232 /* Setup ITCM if present */
196 if (itcm_banks > 0) { 233 if (itcm_banks > 0) {
197 for (i = 0; i < itcm_banks; i++) { 234 for (i = 0; i < itcm_banks; i++) {
@@ -199,6 +236,13 @@ void __init tcm_init(void)
199 if (ret) 236 if (ret)
200 return; 237 return;
201 } 238 }
239 /* This means you compiled more code than fits into ITCM */
240 if (itcm_code_sz > (itcm_end - ITCM_OFFSET)) {
241 pr_info("CPU ITCM: %u bytes of code compiled to "
242 "ITCM but only %lu bytes of ITCM present\n",
243 itcm_code_sz, (itcm_end - ITCM_OFFSET));
244 return;
245 }
202 itcm_res.end = itcm_end - 1; 246 itcm_res.end = itcm_end - 1;
203 request_resource(&iomem_resource, &itcm_res); 247 request_resource(&iomem_resource, &itcm_res);
204 itcm_iomap[0].length = itcm_end - ITCM_OFFSET; 248 itcm_iomap[0].length = itcm_end - ITCM_OFFSET;
@@ -207,10 +251,13 @@ void __init tcm_init(void)
207 start = &__sitcm_text; 251 start = &__sitcm_text;
208 end = &__eitcm_text; 252 end = &__eitcm_text;
209 ram = &__itcm_start; 253 ram = &__itcm_start;
210 /* This means you compiled more code than fits into ITCM */ 254 memcpy(start, ram, itcm_code_sz);
211 BUG_ON((end - start) > (itcm_end - ITCM_OFFSET)); 255 pr_debug("CPU ITCM: copied code from %p - %p\n",
212 memcpy(start, ram, (end-start)); 256 start, end);
213 pr_debug("CPU ITCM: copied code from %p - %p\n", start, end); 257 itcm_present = true;
258 } else if (itcm_code_sz) {
259 pr_info("CPU ITCM: %u bytes of code compiled to ITCM but no "
260 "ITCM banks present in CPU\n", itcm_code_sz);
214 } 261 }
215} 262}
216 263
@@ -221,7 +268,6 @@ void __init tcm_init(void)
221 */ 268 */
222static int __init setup_tcm_pool(void) 269static int __init setup_tcm_pool(void)
223{ 270{
224 u32 tcm_status = read_cpuid_tcmstatus();
225 u32 dtcm_pool_start = (u32) &__edtcm_data; 271 u32 dtcm_pool_start = (u32) &__edtcm_data;
226 u32 itcm_pool_start = (u32) &__eitcm_text; 272 u32 itcm_pool_start = (u32) &__eitcm_text;
227 int ret; 273 int ret;
@@ -236,7 +282,7 @@ static int __init setup_tcm_pool(void)
236 pr_debug("Setting up TCM memory pool\n"); 282 pr_debug("Setting up TCM memory pool\n");
237 283
238 /* Add the rest of DTCM to the TCM pool */ 284 /* Add the rest of DTCM to the TCM pool */
239 if (tcm_status & (0x03 << 16)) { 285 if (dtcm_present) {
240 if (dtcm_pool_start < dtcm_end) { 286 if (dtcm_pool_start < dtcm_end) {
241 ret = gen_pool_add(tcm_pool, dtcm_pool_start, 287 ret = gen_pool_add(tcm_pool, dtcm_pool_start,
242 dtcm_end - dtcm_pool_start, -1); 288 dtcm_end - dtcm_pool_start, -1);
@@ -253,7 +299,7 @@ static int __init setup_tcm_pool(void)
253 } 299 }
254 300
255 /* Add the rest of ITCM to the TCM pool */ 301 /* Add the rest of ITCM to the TCM pool */
256 if (tcm_status & 0x03) { 302 if (itcm_present) {
257 if (itcm_pool_start < itcm_end) { 303 if (itcm_pool_start < itcm_end) {
258 ret = gen_pool_add(tcm_pool, itcm_pool_start, 304 ret = gen_pool_add(tcm_pool, itcm_pool_start,
259 itcm_end - itcm_pool_start, -1); 305 itcm_end - itcm_pool_start, -1);
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index e5287f21badc..bf977f8514f6 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -38,57 +38,6 @@ jiffies = jiffies_64 + 4;
38 38
39SECTIONS 39SECTIONS
40{ 40{
41#ifdef CONFIG_XIP_KERNEL
42 . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
43#else
44 . = PAGE_OFFSET + TEXT_OFFSET;
45#endif
46
47 .init : { /* Init code and data */
48 _stext = .;
49 _sinittext = .;
50 HEAD_TEXT
51 INIT_TEXT
52 ARM_EXIT_KEEP(EXIT_TEXT)
53 _einittext = .;
54 ARM_CPU_DISCARD(PROC_INFO)
55 __arch_info_begin = .;
56 *(.arch.info.init)
57 __arch_info_end = .;
58 __tagtable_begin = .;
59 *(.taglist.init)
60 __tagtable_end = .;
61#ifdef CONFIG_SMP_ON_UP
62 __smpalt_begin = .;
63 *(.alt.smp.init)
64 __smpalt_end = .;
65#endif
66
67 __pv_table_begin = .;
68 *(.pv_table)
69 __pv_table_end = .;
70
71 INIT_SETUP(16)
72
73 INIT_CALLS
74 CON_INITCALL
75 SECURITY_INITCALL
76 INIT_RAM_FS
77
78#ifndef CONFIG_XIP_KERNEL
79 __init_begin = _stext;
80 INIT_DATA
81 ARM_EXIT_KEEP(EXIT_DATA)
82#endif
83 }
84
85 PERCPU_SECTION(32)
86
87#ifndef CONFIG_XIP_KERNEL
88 . = ALIGN(PAGE_SIZE);
89 __init_end = .;
90#endif
91
92 /* 41 /*
93 * unwind exit sections must be discarded before the rest of the 42 * unwind exit sections must be discarded before the rest of the
94 * unwind sections get included. 43 * unwind sections get included.
@@ -106,10 +55,22 @@ SECTIONS
106 *(.fixup) 55 *(.fixup)
107 *(__ex_table) 56 *(__ex_table)
108#endif 57#endif
58#ifndef CONFIG_SMP_ON_UP
59 *(.alt.smp.init)
60#endif
109 } 61 }
110 62
63#ifdef CONFIG_XIP_KERNEL
64 . = XIP_VIRT_ADDR(CONFIG_XIP_PHYS_ADDR);
65#else
66 . = PAGE_OFFSET + TEXT_OFFSET;
67#endif
68 .head.text : {
69 _text = .;
70 HEAD_TEXT
71 }
111 .text : { /* Real text segment */ 72 .text : { /* Real text segment */
112 _text = .; /* Text and read-only data */ 73 _stext = .; /* Text and read-only data */
113 __exception_text_start = .; 74 __exception_text_start = .;
114 *(.exception.text) 75 *(.exception.text)
115 __exception_text_end = .; 76 __exception_text_end = .;
@@ -122,8 +83,6 @@ SECTIONS
122 *(.fixup) 83 *(.fixup)
123#endif 84#endif
124 *(.gnu.warning) 85 *(.gnu.warning)
125 *(.rodata)
126 *(.rodata.*)
127 *(.glue_7) 86 *(.glue_7)
128 *(.glue_7t) 87 *(.glue_7t)
129 . = ALIGN(4); 88 . = ALIGN(4);
@@ -152,10 +111,63 @@ SECTIONS
152 111
153 _etext = .; /* End of text and rodata section */ 112 _etext = .; /* End of text and rodata section */
154 113
114#ifndef CONFIG_XIP_KERNEL
115 . = ALIGN(PAGE_SIZE);
116 __init_begin = .;
117#endif
118
119 INIT_TEXT_SECTION(8)
120 .exit.text : {
121 ARM_EXIT_KEEP(EXIT_TEXT)
122 }
123 .init.proc.info : {
124 ARM_CPU_DISCARD(PROC_INFO)
125 }
126 .init.arch.info : {
127 __arch_info_begin = .;
128 *(.arch.info.init)
129 __arch_info_end = .;
130 }
131 .init.tagtable : {
132 __tagtable_begin = .;
133 *(.taglist.init)
134 __tagtable_end = .;
135 }
136#ifdef CONFIG_SMP_ON_UP
137 .init.smpalt : {
138 __smpalt_begin = .;
139 *(.alt.smp.init)
140 __smpalt_end = .;
141 }
142#endif
143 .init.pv_table : {
144 __pv_table_begin = .;
145 *(.pv_table)
146 __pv_table_end = .;
147 }
148 .init.data : {
149#ifndef CONFIG_XIP_KERNEL
150 INIT_DATA
151#endif
152 INIT_SETUP(16)
153 INIT_CALLS
154 CON_INITCALL
155 SECURITY_INITCALL
156 INIT_RAM_FS
157 }
158#ifndef CONFIG_XIP_KERNEL
159 .exit.data : {
160 ARM_EXIT_KEEP(EXIT_DATA)
161 }
162#endif
163
164 PERCPU_SECTION(32)
165
155#ifdef CONFIG_XIP_KERNEL 166#ifdef CONFIG_XIP_KERNEL
156 __data_loc = ALIGN(4); /* location in binary */ 167 __data_loc = ALIGN(4); /* location in binary */
157 . = PAGE_OFFSET + TEXT_OFFSET; 168 . = PAGE_OFFSET + TEXT_OFFSET;
158#else 169#else
170 __init_end = .;
159 . = ALIGN(THREAD_SIZE); 171 . = ALIGN(THREAD_SIZE);
160 __data_loc = .; 172 __data_loc = .;
161#endif 173#endif
@@ -270,12 +282,6 @@ SECTIONS
270 282
271 /* Default discards */ 283 /* Default discards */
272 DISCARDS 284 DISCARDS
273
274#ifndef CONFIG_SMP_ON_UP
275 /DISCARD/ : {
276 *(.alt.smp.init)
277 }
278#endif
279} 285}
280 286
281/* 287/*
diff --git a/arch/arm/mach-bcmring/include/mach/entry-macro.S b/arch/arm/mach-bcmring/include/mach/entry-macro.S
index 7d393ca010ac..94c950d783ba 100644
--- a/arch/arm/mach-bcmring/include/mach/entry-macro.S
+++ b/arch/arm/mach-bcmring/include/mach/entry-macro.S
@@ -80,7 +80,3 @@
80 80
81 .macro arch_ret_to_user, tmp1, tmp2 81 .macro arch_ret_to_user, tmp1, tmp2
82 .endm 82 .endm
83
84 .macro irq_prio_table
85 .endm
86
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index c67f684ee3e5..09a87e61ffcf 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -520,7 +520,7 @@ fail:
520 */ 520 */
521 if (have_imager()) { 521 if (have_imager()) {
522 label = "HD imager"; 522 label = "HD imager";
523 mux |= 1; 523 mux |= 2;
524 524
525 /* externally mux MMC1/ENET/AIC33 to imager */ 525 /* externally mux MMC1/ENET/AIC33 to imager */
526 mux |= BIT(6) | BIT(5) | BIT(3); 526 mux |= BIT(6) | BIT(5) | BIT(3);
@@ -540,7 +540,7 @@ fail:
540 resets &= ~BIT(1); 540 resets &= ~BIT(1);
541 541
542 if (have_tvp7002()) { 542 if (have_tvp7002()) {
543 mux |= 2; 543 mux |= 1;
544 resets &= ~BIT(2); 544 resets &= ~BIT(2);
545 label = "tvp7002 HD"; 545 label = "tvp7002 HD";
546 } else { 546 } else {
diff --git a/arch/arm/mach-davinci/gpio.c b/arch/arm/mach-davinci/gpio.c
index e7221398e5af..cafbe13a82a5 100644
--- a/arch/arm/mach-davinci/gpio.c
+++ b/arch/arm/mach-davinci/gpio.c
@@ -254,8 +254,10 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc)
254{ 254{
255 struct davinci_gpio_regs __iomem *g; 255 struct davinci_gpio_regs __iomem *g;
256 u32 mask = 0xffff; 256 u32 mask = 0xffff;
257 struct davinci_gpio_controller *d;
257 258
258 g = (__force struct davinci_gpio_regs __iomem *) irq_desc_get_handler_data(desc); 259 d = (struct davinci_gpio_controller *)irq_desc_get_handler_data(desc);
260 g = (struct davinci_gpio_regs __iomem *)d->regs;
259 261
260 /* we only care about one bank */ 262 /* we only care about one bank */
261 if (irq & 1) 263 if (irq & 1)
@@ -274,11 +276,14 @@ gpio_irq_handler(unsigned irq, struct irq_desc *desc)
274 if (!status) 276 if (!status)
275 break; 277 break;
276 __raw_writel(status, &g->intstat); 278 __raw_writel(status, &g->intstat);
277 if (irq & 1)
278 status >>= 16;
279 279
280 /* now demux them to the right lowlevel handler */ 280 /* now demux them to the right lowlevel handler */
281 n = (int)irq_get_handler_data(irq); 281 n = d->irq_base;
282 if (irq & 1) {
283 n += 16;
284 status >>= 16;
285 }
286
282 while (status) { 287 while (status) {
283 res = ffs(status); 288 res = ffs(status);
284 n += res; 289 n += res;
@@ -424,7 +429,13 @@ static int __init davinci_gpio_irq_setup(void)
424 429
425 /* set up all irqs in this bank */ 430 /* set up all irqs in this bank */
426 irq_set_chained_handler(bank_irq, gpio_irq_handler); 431 irq_set_chained_handler(bank_irq, gpio_irq_handler);
427 irq_set_handler_data(bank_irq, (__force void *)g); 432
433 /*
434 * Each chip handles 32 gpios, and each irq bank consists of 16
435 * gpio irqs. Pass the irq bank's corresponding controller to
436 * the chained irq handler.
437 */
438 irq_set_handler_data(bank_irq, &chips[gpio / 32]);
428 439
429 for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) { 440 for (i = 0; i < 16 && gpio < ngpio; i++, irq++, gpio++) {
430 irq_set_chip(irq, &gpio_irqchip); 441 irq_set_chip(irq, &gpio_irqchip);
diff --git a/arch/arm/mach-davinci/include/mach/entry-macro.S b/arch/arm/mach-davinci/include/mach/entry-macro.S
index fbdebc7cb409..e14c0dc0e12c 100644
--- a/arch/arm/mach-davinci/include/mach/entry-macro.S
+++ b/arch/arm/mach-davinci/include/mach/entry-macro.S
@@ -46,6 +46,3 @@
46#endif 46#endif
471002: 471002:
48 .endm 48 .endm
49
50 .macro irq_prio_table
51 .endm
diff --git a/arch/arm/mach-davinci/irq.c b/arch/arm/mach-davinci/irq.c
index bfe68ec4e1a6..952dc126c390 100644
--- a/arch/arm/mach-davinci/irq.c
+++ b/arch/arm/mach-davinci/irq.c
@@ -52,8 +52,14 @@ davinci_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
52 struct irq_chip_type *ct; 52 struct irq_chip_type *ct;
53 53
54 gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq); 54 gc = irq_alloc_generic_chip("AINTC", 1, irq_start, base, handle_edge_irq);
55 if (!gc) {
56 pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n",
57 __func__, irq_start);
58 return;
59 }
60
55 ct = gc->chip_types; 61 ct = gc->chip_types;
56 ct->chip.irq_ack = irq_gc_ack; 62 ct->chip.irq_ack = irq_gc_ack_set_bit;
57 ct->chip.irq_mask = irq_gc_mask_clr_bit; 63 ct->chip.irq_mask = irq_gc_mask_clr_bit;
58 ct->chip.irq_unmask = irq_gc_mask_set_bit; 64 ct->chip.irq_unmask = irq_gc_mask_set_bit;
59 65
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 1d4b65fd673e..6659a0d137a3 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -251,9 +251,9 @@ static void ep93xx_uart_set_mctrl(struct amba_device *dev,
251 unsigned int mcr; 251 unsigned int mcr;
252 252
253 mcr = 0; 253 mcr = 0;
254 if (!(mctrl & TIOCM_RTS)) 254 if (mctrl & TIOCM_RTS)
255 mcr |= 2; 255 mcr |= 2;
256 if (!(mctrl & TIOCM_DTR)) 256 if (mctrl & TIOCM_DTR)
257 mcr |= 1; 257 mcr |= 1;
258 258
259 __raw_writel(mcr, base + EP93XX_UART_MCR_OFFSET); 259 __raw_writel(mcr, base + EP93XX_UART_MCR_OFFSET);
diff --git a/arch/arm/mach-exynos4/cpu.c b/arch/arm/mach-exynos4/cpu.c
index 9babe4473e88..bfd621460abf 100644
--- a/arch/arm/mach-exynos4/cpu.c
+++ b/arch/arm/mach-exynos4/cpu.c
@@ -23,6 +23,7 @@
23#include <plat/sdhci.h> 23#include <plat/sdhci.h>
24#include <plat/devs.h> 24#include <plat/devs.h>
25#include <plat/fimc-core.h> 25#include <plat/fimc-core.h>
26#include <plat/iic-core.h>
26 27
27#include <mach/regs-irq.h> 28#include <mach/regs-irq.h>
28 29
@@ -132,6 +133,11 @@ void __init exynos4_map_io(void)
132 s3c_fimc_setname(1, "exynos4-fimc"); 133 s3c_fimc_setname(1, "exynos4-fimc");
133 s3c_fimc_setname(2, "exynos4-fimc"); 134 s3c_fimc_setname(2, "exynos4-fimc");
134 s3c_fimc_setname(3, "exynos4-fimc"); 135 s3c_fimc_setname(3, "exynos4-fimc");
136
137 /* The I2C bus controllers are directly compatible with s3c2440 */
138 s3c_i2c0_setname("s3c2440-i2c");
139 s3c_i2c1_setname("s3c2440-i2c");
140 s3c_i2c2_setname("s3c2440-i2c");
135} 141}
136 142
137void __init exynos4_init_clocks(int xtal) 143void __init exynos4_init_clocks(int xtal)
diff --git a/arch/arm/mach-exynos4/dev-audio.c b/arch/arm/mach-exynos4/dev-audio.c
index 1eed5f9f7bd3..983069a53239 100644
--- a/arch/arm/mach-exynos4/dev-audio.c
+++ b/arch/arm/mach-exynos4/dev-audio.c
@@ -330,7 +330,7 @@ struct platform_device exynos4_device_ac97 = {
330 330
331static int exynos4_spdif_cfg_gpio(struct platform_device *pdev) 331static int exynos4_spdif_cfg_gpio(struct platform_device *pdev)
332{ 332{
333 s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 2, S3C_GPIO_SFN(3)); 333 s3c_gpio_cfgpin_range(EXYNOS4_GPC1(0), 2, S3C_GPIO_SFN(4));
334 334
335 return 0; 335 return 0;
336} 336}
diff --git a/arch/arm/mach-exynos4/headsmp.S b/arch/arm/mach-exynos4/headsmp.S
index 6c6cfc50c46b..3cdeb3647542 100644
--- a/arch/arm/mach-exynos4/headsmp.S
+++ b/arch/arm/mach-exynos4/headsmp.S
@@ -13,7 +13,7 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <linux/init.h> 14#include <linux/init.h>
15 15
16 __INIT 16 __CPUINIT
17 17
18/* 18/*
19 * exynos4 specific entry point for secondary CPUs. This provides 19 * exynos4 specific entry point for secondary CPUs. This provides
diff --git a/arch/arm/mach-exynos4/mach-smdkv310.c b/arch/arm/mach-exynos4/mach-smdkv310.c
index 152676471b67..edd814110da8 100644
--- a/arch/arm/mach-exynos4/mach-smdkv310.c
+++ b/arch/arm/mach-exynos4/mach-smdkv310.c
@@ -78,9 +78,7 @@ static struct s3c2410_uartcfg smdkv310_uartcfgs[] __initdata = {
78}; 78};
79 79
80static struct s3c_sdhci_platdata smdkv310_hsmmc0_pdata __initdata = { 80static struct s3c_sdhci_platdata smdkv310_hsmmc0_pdata __initdata = {
81 .cd_type = S3C_SDHCI_CD_GPIO, 81 .cd_type = S3C_SDHCI_CD_INTERNAL,
82 .ext_cd_gpio = EXYNOS4_GPK0(2),
83 .ext_cd_gpio_invert = 1,
84 .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, 82 .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
85#ifdef CONFIG_EXYNOS4_SDHCI_CH0_8BIT 83#ifdef CONFIG_EXYNOS4_SDHCI_CH0_8BIT
86 .max_width = 8, 84 .max_width = 8,
@@ -96,9 +94,7 @@ static struct s3c_sdhci_platdata smdkv310_hsmmc1_pdata __initdata = {
96}; 94};
97 95
98static struct s3c_sdhci_platdata smdkv310_hsmmc2_pdata __initdata = { 96static struct s3c_sdhci_platdata smdkv310_hsmmc2_pdata __initdata = {
99 .cd_type = S3C_SDHCI_CD_GPIO, 97 .cd_type = S3C_SDHCI_CD_INTERNAL,
100 .ext_cd_gpio = EXYNOS4_GPK2(2),
101 .ext_cd_gpio_invert = 1,
102 .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL, 98 .clk_type = S3C_SDHCI_CLK_DIV_EXTERNAL,
103#ifdef CONFIG_EXYNOS4_SDHCI_CH2_8BIT 99#ifdef CONFIG_EXYNOS4_SDHCI_CH2_8BIT
104 .max_width = 8, 100 .max_width = 8,
diff --git a/arch/arm/mach-exynos4/platsmp.c b/arch/arm/mach-exynos4/platsmp.c
index c5e65a02be8d..b68d5bdf04cf 100644
--- a/arch/arm/mach-exynos4/platsmp.c
+++ b/arch/arm/mach-exynos4/platsmp.c
@@ -154,14 +154,6 @@ void __init smp_init_cpus(void)
154 154
155void __init platform_smp_prepare_cpus(unsigned int max_cpus) 155void __init platform_smp_prepare_cpus(unsigned int max_cpus)
156{ 156{
157 int i;
158
159 /*
160 * Initialise the present map, which describes the set of CPUs
161 * actually populated at the present time.
162 */
163 for (i = 0; i < max_cpus; i++)
164 set_cpu_present(i, true);
165 157
166 scu_enable(scu_base_addr()); 158 scu_enable(scu_base_addr());
167 159
diff --git a/arch/arm/mach-exynos4/pm.c b/arch/arm/mach-exynos4/pm.c
index 8755ca8dd48d..533c28f758ca 100644
--- a/arch/arm/mach-exynos4/pm.c
+++ b/arch/arm/mach-exynos4/pm.c
@@ -280,7 +280,7 @@ static struct sleep_save exynos4_l2cc_save[] = {
280 SAVE_ITEM(S5P_VA_L2CC + L2X0_AUX_CTRL), 280 SAVE_ITEM(S5P_VA_L2CC + L2X0_AUX_CTRL),
281}; 281};
282 282
283void exynos4_cpu_suspend(void) 283static int exynos4_cpu_suspend(unsigned long arg)
284{ 284{
285 unsigned long tmp; 285 unsigned long tmp;
286 unsigned long mask = 0xFFFFFFFF; 286 unsigned long mask = 0xFFFFFFFF;
diff --git a/arch/arm/mach-exynos4/sleep.S b/arch/arm/mach-exynos4/sleep.S
index 6b62425417a6..0984078f1eba 100644
--- a/arch/arm/mach-exynos4/sleep.S
+++ b/arch/arm/mach-exynos4/sleep.S
@@ -33,28 +33,6 @@
33 .text 33 .text
34 34
35 /* 35 /*
36 * s3c_cpu_save
37 *
38 * entry:
39 * r1 = v:p offset
40 */
41
42ENTRY(s3c_cpu_save)
43
44 stmfd sp!, { r3 - r12, lr }
45 ldr r3, =resume_with_mmu
46 bl cpu_suspend
47
48 ldr r0, =pm_cpu_sleep
49 ldr r0, [ r0 ]
50 mov pc, r0
51
52resume_with_mmu:
53 ldmfd sp!, { r3 - r12, pc }
54
55 .ltorg
56
57 /*
58 * sleep magic, to allow the bootloader to check for an valid 36 * sleep magic, to allow the bootloader to check for an valid
59 * image to resume to. Must be the first word before the 37 * image to resume to. Must be the first word before the
60 * s3c_cpu_resume entry. 38 * s3c_cpu_resume entry.
diff --git a/arch/arm/mach-h720x/include/mach/entry-macro.S b/arch/arm/mach-h720x/include/mach/entry-macro.S
index 6d3b917c4a18..c3948e5ba4a0 100644
--- a/arch/arm/mach-h720x/include/mach/entry-macro.S
+++ b/arch/arm/mach-h720x/include/mach/entry-macro.S
@@ -57,9 +57,6 @@
57 tst \irqstat, #1 @ bit 0 should be set 57 tst \irqstat, #1 @ bit 0 should be set
58 .endm 58 .endm
59 59
60 .macro irq_prio_table
61 .endm
62
63#else 60#else
64#error hynix processor selection missmatch 61#error hynix processor selection missmatch
65#endif 62#endif
diff --git a/arch/arm/mach-ixp4xx/common-pci.c b/arch/arm/mach-ixp4xx/common-pci.c
index e9a589395723..e2e98bbb6413 100644
--- a/arch/arm/mach-ixp4xx/common-pci.c
+++ b/arch/arm/mach-ixp4xx/common-pci.c
@@ -316,6 +316,11 @@ static int abort_handler(unsigned long addr, unsigned int fsr, struct pt_regs *r
316} 316}
317 317
318 318
319static int ixp4xx_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
320{
321 return (dma_addr + size) >= SZ_64M;
322}
323
319/* 324/*
320 * Setup DMA mask to 64MB on PCI devices. Ignore all other devices. 325 * Setup DMA mask to 64MB on PCI devices. Ignore all other devices.
321 */ 326 */
@@ -324,7 +329,7 @@ static int ixp4xx_pci_platform_notify(struct device *dev)
324 if(dev->bus == &pci_bus_type) { 329 if(dev->bus == &pci_bus_type) {
325 *dev->dma_mask = SZ_64M - 1; 330 *dev->dma_mask = SZ_64M - 1;
326 dev->coherent_dma_mask = SZ_64M - 1; 331 dev->coherent_dma_mask = SZ_64M - 1;
327 dmabounce_register_dev(dev, 2048, 4096); 332 dmabounce_register_dev(dev, 2048, 4096, ixp4xx_needs_bounce);
328 } 333 }
329 return 0; 334 return 0;
330} 335}
@@ -337,11 +342,6 @@ static int ixp4xx_pci_platform_notify_remove(struct device *dev)
337 return 0; 342 return 0;
338} 343}
339 344
340int dma_needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
341{
342 return (dev->bus == &pci_bus_type ) && ((dma_addr + size) >= SZ_64M);
343}
344
345void __init ixp4xx_pci_preinit(void) 345void __init ixp4xx_pci_preinit(void)
346{ 346{
347 unsigned long cpuid = read_cpuid_id(); 347 unsigned long cpuid = read_cpuid_id();
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 74ed81a3cb1a..07772575d7ab 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -419,14 +419,20 @@ static void notrace ixp4xx_update_sched_clock(void)
419/* 419/*
420 * clocksource 420 * clocksource
421 */ 421 */
422
423static cycle_t ixp4xx_clocksource_read(struct clocksource *c)
424{
425 return *IXP4XX_OSTS;
426}
427
422unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ; 428unsigned long ixp4xx_timer_freq = IXP4XX_TIMER_FREQ;
423EXPORT_SYMBOL(ixp4xx_timer_freq); 429EXPORT_SYMBOL(ixp4xx_timer_freq);
424static void __init ixp4xx_clocksource_init(void) 430static void __init ixp4xx_clocksource_init(void)
425{ 431{
426 init_sched_clock(&cd, ixp4xx_update_sched_clock, 32, ixp4xx_timer_freq); 432 init_sched_clock(&cd, ixp4xx_update_sched_clock, 32, ixp4xx_timer_freq);
427 433
428 clocksource_mmio_init(&IXP4XX_OSTS, "OSTS", ixp4xx_timer_freq, 200, 32, 434 clocksource_mmio_init(NULL, "OSTS", ixp4xx_timer_freq, 200, 32,
429 clocksource_mmio_readl_up); 435 ixp4xx_clocksource_read);
430} 436}
431 437
432/* 438/*
diff --git a/arch/arm/mach-lpc32xx/include/mach/entry-macro.S b/arch/arm/mach-lpc32xx/include/mach/entry-macro.S
index 870227c96602..b725f6c93975 100644
--- a/arch/arm/mach-lpc32xx/include/mach/entry-macro.S
+++ b/arch/arm/mach-lpc32xx/include/mach/entry-macro.S
@@ -41,7 +41,3 @@
41 rsb \irqnr, \irqnr, #31 41 rsb \irqnr, \irqnr, #31
42 teq \irqstat, #0 42 teq \irqstat, #0
43 .endm 43 .endm
44
45 .macro irq_prio_table
46 .endm
47
diff --git a/arch/arm/mach-mmp/pxa168.c b/arch/arm/mach-mmp/pxa168.c
index 72b4e7631583..ab9f999106c7 100644
--- a/arch/arm/mach-mmp/pxa168.c
+++ b/arch/arm/mach-mmp/pxa168.c
@@ -79,7 +79,7 @@ static APBC_CLK(ssp4, PXA168_SSP4, 4, 0);
79static APBC_CLK(ssp5, PXA168_SSP5, 4, 0); 79static APBC_CLK(ssp5, PXA168_SSP5, 4, 0);
80static APBC_CLK(keypad, PXA168_KPC, 0, 32000); 80static APBC_CLK(keypad, PXA168_KPC, 0, 32000);
81 81
82static APMU_CLK(nand, NAND, 0x01db, 208000000); 82static APMU_CLK(nand, NAND, 0x19b, 156000000);
83static APMU_CLK(lcd, LCD, 0x7f, 312000000); 83static APMU_CLK(lcd, LCD, 0x7f, 312000000);
84 84
85/* device and clock bindings */ 85/* device and clock bindings */
diff --git a/arch/arm/mach-mmp/pxa910.c b/arch/arm/mach-mmp/pxa910.c
index 8f92ccd26edf..1464607aa60d 100644
--- a/arch/arm/mach-mmp/pxa910.c
+++ b/arch/arm/mach-mmp/pxa910.c
@@ -110,7 +110,7 @@ static APBC_CLK(pwm2, PXA910_PWM2, 1, 13000000);
110static APBC_CLK(pwm3, PXA910_PWM3, 1, 13000000); 110static APBC_CLK(pwm3, PXA910_PWM3, 1, 13000000);
111static APBC_CLK(pwm4, PXA910_PWM4, 1, 13000000); 111static APBC_CLK(pwm4, PXA910_PWM4, 1, 13000000);
112 112
113static APMU_CLK(nand, NAND, 0x01db, 208000000); 113static APMU_CLK(nand, NAND, 0x19b, 156000000);
114static APMU_CLK(u2o, USB, 0x1b, 480000000); 114static APMU_CLK(u2o, USB, 0x1b, 480000000);
115 115
116/* device and clock bindings */ 116/* device and clock bindings */
diff --git a/arch/arm/mach-msm/platsmp.c b/arch/arm/mach-msm/platsmp.c
index 2034098cf015..315b9f365329 100644
--- a/arch/arm/mach-msm/platsmp.c
+++ b/arch/arm/mach-msm/platsmp.c
@@ -157,12 +157,4 @@ void __init smp_init_cpus(void)
157 157
158void __init platform_smp_prepare_cpus(unsigned int max_cpus) 158void __init platform_smp_prepare_cpus(unsigned int max_cpus)
159{ 159{
160 int i;
161
162 /*
163 * Initialise the present map, which describes the set of CPUs
164 * actually populated at the present time.
165 */
166 for (i = 0; i < max_cpus; i++)
167 set_cpu_present(i, true);
168} 160}
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index de88c9297b68..f49ce85d2448 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -215,7 +215,7 @@ static struct omap_kp_platform_data ams_delta_kp_data __initdata = {
215 .delay = 9, 215 .delay = 9,
216}; 216};
217 217
218static struct platform_device ams_delta_kp_device __initdata = { 218static struct platform_device ams_delta_kp_device = {
219 .name = "omap-keypad", 219 .name = "omap-keypad",
220 .id = -1, 220 .id = -1,
221 .dev = { 221 .dev = {
@@ -225,12 +225,12 @@ static struct platform_device ams_delta_kp_device __initdata = {
225 .resource = ams_delta_kp_resources, 225 .resource = ams_delta_kp_resources,
226}; 226};
227 227
228static struct platform_device ams_delta_lcd_device __initdata = { 228static struct platform_device ams_delta_lcd_device = {
229 .name = "lcd_ams_delta", 229 .name = "lcd_ams_delta",
230 .id = -1, 230 .id = -1,
231}; 231};
232 232
233static struct platform_device ams_delta_led_device __initdata = { 233static struct platform_device ams_delta_led_device = {
234 .name = "ams-delta-led", 234 .name = "ams-delta-led",
235 .id = -1 235 .id = -1
236}; 236};
@@ -267,7 +267,7 @@ static struct soc_camera_link ams_delta_iclink = {
267 .power = ams_delta_camera_power, 267 .power = ams_delta_camera_power,
268}; 268};
269 269
270static struct platform_device ams_delta_camera_device __initdata = { 270static struct platform_device ams_delta_camera_device = {
271 .name = "soc-camera-pdrv", 271 .name = "soc-camera-pdrv",
272 .id = 0, 272 .id = 0,
273 .dev = { 273 .dev = {
diff --git a/arch/arm/mach-omap1/gpio15xx.c b/arch/arm/mach-omap1/gpio15xx.c
index 04c4b04cf54e..364137c2042c 100644
--- a/arch/arm/mach-omap1/gpio15xx.c
+++ b/arch/arm/mach-omap1/gpio15xx.c
@@ -41,7 +41,7 @@ static struct __initdata omap_gpio_platform_data omap15xx_mpu_gpio_config = {
41 .bank_stride = 1, 41 .bank_stride = 1,
42}; 42};
43 43
44static struct __initdata platform_device omap15xx_mpu_gpio = { 44static struct platform_device omap15xx_mpu_gpio = {
45 .name = "omap_gpio", 45 .name = "omap_gpio",
46 .id = 0, 46 .id = 0,
47 .dev = { 47 .dev = {
@@ -70,7 +70,7 @@ static struct __initdata omap_gpio_platform_data omap15xx_gpio_config = {
70 .bank_width = 16, 70 .bank_width = 16,
71}; 71};
72 72
73static struct __initdata platform_device omap15xx_gpio = { 73static struct platform_device omap15xx_gpio = {
74 .name = "omap_gpio", 74 .name = "omap_gpio",
75 .id = 1, 75 .id = 1,
76 .dev = { 76 .dev = {
diff --git a/arch/arm/mach-omap1/gpio16xx.c b/arch/arm/mach-omap1/gpio16xx.c
index 5dd0d4c82b24..293a246e2824 100644
--- a/arch/arm/mach-omap1/gpio16xx.c
+++ b/arch/arm/mach-omap1/gpio16xx.c
@@ -44,7 +44,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_mpu_gpio_config = {
44 .bank_stride = 1, 44 .bank_stride = 1,
45}; 45};
46 46
47static struct __initdata platform_device omap16xx_mpu_gpio = { 47static struct platform_device omap16xx_mpu_gpio = {
48 .name = "omap_gpio", 48 .name = "omap_gpio",
49 .id = 0, 49 .id = 0,
50 .dev = { 50 .dev = {
@@ -73,7 +73,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio1_config = {
73 .bank_width = 16, 73 .bank_width = 16,
74}; 74};
75 75
76static struct __initdata platform_device omap16xx_gpio1 = { 76static struct platform_device omap16xx_gpio1 = {
77 .name = "omap_gpio", 77 .name = "omap_gpio",
78 .id = 1, 78 .id = 1,
79 .dev = { 79 .dev = {
@@ -102,7 +102,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio2_config = {
102 .bank_width = 16, 102 .bank_width = 16,
103}; 103};
104 104
105static struct __initdata platform_device omap16xx_gpio2 = { 105static struct platform_device omap16xx_gpio2 = {
106 .name = "omap_gpio", 106 .name = "omap_gpio",
107 .id = 2, 107 .id = 2,
108 .dev = { 108 .dev = {
@@ -131,7 +131,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio3_config = {
131 .bank_width = 16, 131 .bank_width = 16,
132}; 132};
133 133
134static struct __initdata platform_device omap16xx_gpio3 = { 134static struct platform_device omap16xx_gpio3 = {
135 .name = "omap_gpio", 135 .name = "omap_gpio",
136 .id = 3, 136 .id = 3,
137 .dev = { 137 .dev = {
@@ -160,7 +160,7 @@ static struct __initdata omap_gpio_platform_data omap16xx_gpio4_config = {
160 .bank_width = 16, 160 .bank_width = 16,
161}; 161};
162 162
163static struct __initdata platform_device omap16xx_gpio4 = { 163static struct platform_device omap16xx_gpio4 = {
164 .name = "omap_gpio", 164 .name = "omap_gpio",
165 .id = 4, 165 .id = 4,
166 .dev = { 166 .dev = {
diff --git a/arch/arm/mach-omap1/gpio7xx.c b/arch/arm/mach-omap1/gpio7xx.c
index 1204c8b871af..c6ad248d63a6 100644
--- a/arch/arm/mach-omap1/gpio7xx.c
+++ b/arch/arm/mach-omap1/gpio7xx.c
@@ -46,7 +46,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_mpu_gpio_config = {
46 .bank_stride = 2, 46 .bank_stride = 2,
47}; 47};
48 48
49static struct __initdata platform_device omap7xx_mpu_gpio = { 49static struct platform_device omap7xx_mpu_gpio = {
50 .name = "omap_gpio", 50 .name = "omap_gpio",
51 .id = 0, 51 .id = 0,
52 .dev = { 52 .dev = {
@@ -75,7 +75,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio1_config = {
75 .bank_width = 32, 75 .bank_width = 32,
76}; 76};
77 77
78static struct __initdata platform_device omap7xx_gpio1 = { 78static struct platform_device omap7xx_gpio1 = {
79 .name = "omap_gpio", 79 .name = "omap_gpio",
80 .id = 1, 80 .id = 1,
81 .dev = { 81 .dev = {
@@ -104,7 +104,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio2_config = {
104 .bank_width = 32, 104 .bank_width = 32,
105}; 105};
106 106
107static struct __initdata platform_device omap7xx_gpio2 = { 107static struct platform_device omap7xx_gpio2 = {
108 .name = "omap_gpio", 108 .name = "omap_gpio",
109 .id = 2, 109 .id = 2,
110 .dev = { 110 .dev = {
@@ -133,7 +133,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio3_config = {
133 .bank_width = 32, 133 .bank_width = 32,
134}; 134};
135 135
136static struct __initdata platform_device omap7xx_gpio3 = { 136static struct platform_device omap7xx_gpio3 = {
137 .name = "omap_gpio", 137 .name = "omap_gpio",
138 .id = 3, 138 .id = 3,
139 .dev = { 139 .dev = {
@@ -162,7 +162,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio4_config = {
162 .bank_width = 32, 162 .bank_width = 32,
163}; 163};
164 164
165static struct __initdata platform_device omap7xx_gpio4 = { 165static struct platform_device omap7xx_gpio4 = {
166 .name = "omap_gpio", 166 .name = "omap_gpio",
167 .id = 4, 167 .id = 4,
168 .dev = { 168 .dev = {
@@ -191,7 +191,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio5_config = {
191 .bank_width = 32, 191 .bank_width = 32,
192}; 192};
193 193
194static struct __initdata platform_device omap7xx_gpio5 = { 194static struct platform_device omap7xx_gpio5 = {
195 .name = "omap_gpio", 195 .name = "omap_gpio",
196 .id = 5, 196 .id = 5,
197 .dev = { 197 .dev = {
@@ -220,7 +220,7 @@ static struct __initdata omap_gpio_platform_data omap7xx_gpio6_config = {
220 .bank_width = 32, 220 .bank_width = 32,
221}; 221};
222 222
223static struct __initdata platform_device omap7xx_gpio6 = { 223static struct platform_device omap7xx_gpio6 = {
224 .name = "omap_gpio", 224 .name = "omap_gpio",
225 .id = 6, 225 .id = 6,
226 .dev = { 226 .dev = {
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index 990366726c58..88bd6f7705f0 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -558,7 +558,7 @@ static struct radio_si4713_platform_data rx51_si4713_data __initdata_or_module =
558 .subdev_board_info = &rx51_si4713_board_info, 558 .subdev_board_info = &rx51_si4713_board_info,
559}; 559};
560 560
561static struct platform_device rx51_si4713_dev __initdata_or_module = { 561static struct platform_device rx51_si4713_dev = {
562 .name = "radio-si4713", 562 .name = "radio-si4713",
563 .id = -1, 563 .id = -1,
564 .dev = { 564 .dev = {
diff --git a/arch/arm/mach-omap2/control.c b/arch/arm/mach-omap2/control.c
index da53ba3917ca..aab884fecc55 100644
--- a/arch/arm/mach-omap2/control.c
+++ b/arch/arm/mach-omap2/control.c
@@ -286,14 +286,15 @@ void omap3_save_scratchpad_contents(void)
286 scratchpad_contents.boot_config_ptr = 0x0; 286 scratchpad_contents.boot_config_ptr = 0x0;
287 if (cpu_is_omap3630()) 287 if (cpu_is_omap3630())
288 scratchpad_contents.public_restore_ptr = 288 scratchpad_contents.public_restore_ptr =
289 virt_to_phys(get_omap3630_restore_pointer()); 289 virt_to_phys(omap3_restore_3630);
290 else if (omap_rev() != OMAP3430_REV_ES3_0 && 290 else if (omap_rev() != OMAP3430_REV_ES3_0 &&
291 omap_rev() != OMAP3430_REV_ES3_1) 291 omap_rev() != OMAP3430_REV_ES3_1)
292 scratchpad_contents.public_restore_ptr = 292 scratchpad_contents.public_restore_ptr =
293 virt_to_phys(get_restore_pointer()); 293 virt_to_phys(omap3_restore);
294 else 294 else
295 scratchpad_contents.public_restore_ptr = 295 scratchpad_contents.public_restore_ptr =
296 virt_to_phys(get_es3_restore_pointer()); 296 virt_to_phys(omap3_restore_es3);
297
297 if (omap_type() == OMAP2_DEVICE_TYPE_GP) 298 if (omap_type() == OMAP2_DEVICE_TYPE_GP)
298 scratchpad_contents.secure_ram_restore_ptr = 0x0; 299 scratchpad_contents.secure_ram_restore_ptr = 0x0;
299 else 300 else
diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h
index a016c8b59e00..d4ef75d5a382 100644
--- a/arch/arm/mach-omap2/control.h
+++ b/arch/arm/mach-omap2/control.h
@@ -386,9 +386,9 @@ extern void omap4_ctrl_pad_writel(u32 val, u16 offset);
386 386
387extern void omap3_save_scratchpad_contents(void); 387extern void omap3_save_scratchpad_contents(void);
388extern void omap3_clear_scratchpad_contents(void); 388extern void omap3_clear_scratchpad_contents(void);
389extern u32 *get_restore_pointer(void); 389extern void omap3_restore(void);
390extern u32 *get_es3_restore_pointer(void); 390extern void omap3_restore_es3(void);
391extern u32 *get_omap3630_restore_pointer(void); 391extern void omap3_restore_3630(void);
392extern u32 omap3_arm_context[128]; 392extern u32 omap3_arm_context[128];
393extern void omap3_control_save_context(void); 393extern void omap3_control_save_context(void);
394extern void omap3_control_restore_context(void); 394extern void omap3_control_restore_context(void);
diff --git a/arch/arm/mach-omap2/include/mach/entry-macro.S b/arch/arm/mach-omap2/include/mach/entry-macro.S
index a48690b90990..ceb8b7e593d7 100644
--- a/arch/arm/mach-omap2/include/mach/entry-macro.S
+++ b/arch/arm/mach-omap2/include/mach/entry-macro.S
@@ -165,6 +165,3 @@
165#endif 165#endif
166 166
167#endif /* MULTI_OMAP2 */ 167#endif /* MULTI_OMAP2 */
168
169 .macro irq_prio_table
170 .endm
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index ecfe93c4b585..ce65e9329c7b 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -125,14 +125,6 @@ void __init smp_init_cpus(void)
125 125
126void __init platform_smp_prepare_cpus(unsigned int max_cpus) 126void __init platform_smp_prepare_cpus(unsigned int max_cpus)
127{ 127{
128 int i;
129
130 /*
131 * Initialise the present map, which describes the set of CPUs
132 * actually populated at the present time.
133 */
134 for (i = 0; i < max_cpus; i++)
135 set_cpu_present(i, true);
136 128
137 /* 129 /*
138 * Initialise the SCU and wake up the secondary core using 130 * Initialise the SCU and wake up the secondary core using
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h
index 45bcfce77352..04ee56646126 100644
--- a/arch/arm/mach-omap2/pm.h
+++ b/arch/arm/mach-omap2/pm.h
@@ -88,18 +88,28 @@ extern int pm_dbg_regset_init(int reg_set);
88#define pm_dbg_regset_init(reg_set) do {} while (0); 88#define pm_dbg_regset_init(reg_set) do {} while (0);
89#endif /* CONFIG_PM_DEBUG */ 89#endif /* CONFIG_PM_DEBUG */
90 90
91/* 24xx */
91extern void omap24xx_idle_loop_suspend(void); 92extern void omap24xx_idle_loop_suspend(void);
93extern unsigned int omap24xx_idle_loop_suspend_sz;
92 94
93extern void omap24xx_cpu_suspend(u32 dll_ctrl, void __iomem *sdrc_dlla_ctrl, 95extern void omap24xx_cpu_suspend(u32 dll_ctrl, void __iomem *sdrc_dlla_ctrl,
94 void __iomem *sdrc_power); 96 void __iomem *sdrc_power);
95extern void omap34xx_cpu_suspend(u32 *addr, int save_state); 97extern unsigned int omap24xx_cpu_suspend_sz;
96extern int save_secure_ram_context(u32 *addr);
97extern void omap3_save_scratchpad_contents(void);
98 98
99extern unsigned int omap24xx_idle_loop_suspend_sz; 99/* 3xxx */
100extern void omap34xx_cpu_suspend(int save_state);
101
102/* omap3_do_wfi function pointer and size, for copy to SRAM */
103extern void omap3_do_wfi(void);
104extern unsigned int omap3_do_wfi_sz;
105/* ... and its pointer from SRAM after copy */
106extern void (*omap3_do_wfi_sram)(void);
107
108/* save_secure_ram_context function pointer and size, for copy to SRAM */
109extern int save_secure_ram_context(u32 *addr);
100extern unsigned int save_secure_ram_context_sz; 110extern unsigned int save_secure_ram_context_sz;
101extern unsigned int omap24xx_cpu_suspend_sz; 111
102extern unsigned int omap34xx_cpu_suspend_sz; 112extern void omap3_save_scratchpad_contents(void);
103 113
104#define PM_RTA_ERRATUM_i608 (1 << 0) 114#define PM_RTA_ERRATUM_i608 (1 << 0)
105#define PM_SDRC_WAKEUP_ERRATUM_i583 (1 << 1) 115#define PM_SDRC_WAKEUP_ERRATUM_i583 (1 << 1)
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index c155c9d1c82c..b77d82665abb 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -31,6 +31,8 @@
31#include <linux/console.h> 31#include <linux/console.h>
32#include <trace/events/power.h> 32#include <trace/events/power.h>
33 33
34#include <asm/suspend.h>
35
34#include <plat/sram.h> 36#include <plat/sram.h>
35#include "clockdomain.h" 37#include "clockdomain.h"
36#include "powerdomain.h" 38#include "powerdomain.h"
@@ -40,8 +42,6 @@
40#include <plat/gpmc.h> 42#include <plat/gpmc.h>
41#include <plat/dma.h> 43#include <plat/dma.h>
42 44
43#include <asm/tlbflush.h>
44
45#include "cm2xxx_3xxx.h" 45#include "cm2xxx_3xxx.h"
46#include "cm-regbits-34xx.h" 46#include "cm-regbits-34xx.h"
47#include "prm-regbits-34xx.h" 47#include "prm-regbits-34xx.h"
@@ -64,11 +64,6 @@ static inline bool is_suspending(void)
64} 64}
65#endif 65#endif
66 66
67/* Scratchpad offsets */
68#define OMAP343X_TABLE_ADDRESS_OFFSET 0xc4
69#define OMAP343X_TABLE_VALUE_OFFSET 0xc0
70#define OMAP343X_CONTROL_REG_VALUE_OFFSET 0xc8
71
72/* pm34xx errata defined in pm.h */ 67/* pm34xx errata defined in pm.h */
73u16 pm34xx_errata; 68u16 pm34xx_errata;
74 69
@@ -83,9 +78,8 @@ struct power_state {
83 78
84static LIST_HEAD(pwrst_list); 79static LIST_HEAD(pwrst_list);
85 80
86static void (*_omap_sram_idle)(u32 *addr, int save_state);
87
88static int (*_omap_save_secure_sram)(u32 *addr); 81static int (*_omap_save_secure_sram)(u32 *addr);
82void (*omap3_do_wfi_sram)(void);
89 83
90static struct powerdomain *mpu_pwrdm, *neon_pwrdm; 84static struct powerdomain *mpu_pwrdm, *neon_pwrdm;
91static struct powerdomain *core_pwrdm, *per_pwrdm; 85static struct powerdomain *core_pwrdm, *per_pwrdm;
@@ -312,28 +306,25 @@ static irqreturn_t prcm_interrupt_handler (int irq, void *dev_id)
312 return IRQ_HANDLED; 306 return IRQ_HANDLED;
313} 307}
314 308
315/* Function to restore the table entry that was modified for enabling MMU */ 309static void omap34xx_save_context(u32 *save)
316static void restore_table_entry(void)
317{ 310{
318 void __iomem *scratchpad_address; 311 u32 val;
319 u32 previous_value, control_reg_value;
320 u32 *address;
321 312
322 scratchpad_address = OMAP2_L4_IO_ADDRESS(OMAP343X_SCRATCHPAD); 313 /* Read Auxiliary Control Register */
314 asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (val));
315 *save++ = 1;
316 *save++ = val;
323 317
324 /* Get address of entry that was modified */ 318 /* Read L2 AUX ctrl register */
325 address = (u32 *)__raw_readl(scratchpad_address + 319 asm("mrc p15, 1, %0, c9, c0, 2" : "=r" (val));
326 OMAP343X_TABLE_ADDRESS_OFFSET); 320 *save++ = 1;
327 /* Get the previous value which needs to be restored */ 321 *save++ = val;
328 previous_value = __raw_readl(scratchpad_address + 322}
329 OMAP343X_TABLE_VALUE_OFFSET); 323
330 address = __va(address); 324static int omap34xx_do_sram_idle(unsigned long save_state)
331 *address = previous_value; 325{
332 flush_tlb_all(); 326 omap34xx_cpu_suspend(save_state);
333 control_reg_value = __raw_readl(scratchpad_address 327 return 0;
334 + OMAP343X_CONTROL_REG_VALUE_OFFSET);
335 /* This will enable caches and prediction */
336 set_cr(control_reg_value);
337} 328}
338 329
339void omap_sram_idle(void) 330void omap_sram_idle(void)
@@ -352,9 +343,6 @@ void omap_sram_idle(void)
352 int core_prev_state, per_prev_state; 343 int core_prev_state, per_prev_state;
353 u32 sdrc_pwr = 0; 344 u32 sdrc_pwr = 0;
354 345
355 if (!_omap_sram_idle)
356 return;
357
358 pwrdm_clear_all_prev_pwrst(mpu_pwrdm); 346 pwrdm_clear_all_prev_pwrst(mpu_pwrdm);
359 pwrdm_clear_all_prev_pwrst(neon_pwrdm); 347 pwrdm_clear_all_prev_pwrst(neon_pwrdm);
360 pwrdm_clear_all_prev_pwrst(core_pwrdm); 348 pwrdm_clear_all_prev_pwrst(core_pwrdm);
@@ -432,12 +420,16 @@ void omap_sram_idle(void)
432 sdrc_pwr = sdrc_read_reg(SDRC_POWER); 420 sdrc_pwr = sdrc_read_reg(SDRC_POWER);
433 421
434 /* 422 /*
435 * omap3_arm_context is the location where ARM registers 423 * omap3_arm_context is the location where some ARM context
436 * get saved. The restore path then reads from this 424 * get saved. The rest is placed on the stack, and restored
437 * location and restores them back. 425 * from there before resuming.
438 */ 426 */
439 _omap_sram_idle(omap3_arm_context, save_state); 427 if (save_state)
440 cpu_init(); 428 omap34xx_save_context(omap3_arm_context);
429 if (save_state == 1 || save_state == 3)
430 cpu_suspend(save_state, omap34xx_do_sram_idle);
431 else
432 omap34xx_do_sram_idle(save_state);
441 433
442 /* Restore normal SDRC POWER settings */ 434 /* Restore normal SDRC POWER settings */
443 if (omap_rev() >= OMAP3430_REV_ES3_0 && 435 if (omap_rev() >= OMAP3430_REV_ES3_0 &&
@@ -445,10 +437,6 @@ void omap_sram_idle(void)
445 core_next_state == PWRDM_POWER_OFF) 437 core_next_state == PWRDM_POWER_OFF)
446 sdrc_write_reg(sdrc_pwr, SDRC_POWER); 438 sdrc_write_reg(sdrc_pwr, SDRC_POWER);
447 439
448 /* Restore table entry modified during MMU restoration */
449 if (pwrdm_read_prev_pwrst(mpu_pwrdm) == PWRDM_POWER_OFF)
450 restore_table_entry();
451
452 /* CORE */ 440 /* CORE */
453 if (core_next_state < PWRDM_POWER_ON) { 441 if (core_next_state < PWRDM_POWER_ON) {
454 core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm); 442 core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
@@ -852,10 +840,17 @@ static int __init clkdms_setup(struct clockdomain *clkdm, void *unused)
852 return 0; 840 return 0;
853} 841}
854 842
843/*
844 * Push functions to SRAM
845 *
846 * The minimum set of functions is pushed to SRAM for execution:
847 * - omap3_do_wfi for erratum i581 WA,
848 * - save_secure_ram_context for security extensions.
849 */
855void omap_push_sram_idle(void) 850void omap_push_sram_idle(void)
856{ 851{
857 _omap_sram_idle = omap_sram_push(omap34xx_cpu_suspend, 852 omap3_do_wfi_sram = omap_sram_push(omap3_do_wfi, omap3_do_wfi_sz);
858 omap34xx_cpu_suspend_sz); 853
859 if (omap_type() != OMAP2_DEVICE_TYPE_GP) 854 if (omap_type() != OMAP2_DEVICE_TYPE_GP)
860 _omap_save_secure_sram = omap_sram_push(save_secure_ram_context, 855 _omap_save_secure_sram = omap_sram_push(save_secure_ram_context,
861 save_secure_ram_context_sz); 856 save_secure_ram_context_sz);
@@ -920,7 +915,6 @@ static int __init omap3_pm_init(void)
920 per_clkdm = clkdm_lookup("per_clkdm"); 915 per_clkdm = clkdm_lookup("per_clkdm");
921 core_clkdm = clkdm_lookup("core_clkdm"); 916 core_clkdm = clkdm_lookup("core_clkdm");
922 917
923 omap_push_sram_idle();
924#ifdef CONFIG_SUSPEND 918#ifdef CONFIG_SUSPEND
925 suspend_set_ops(&omap_pm_ops); 919 suspend_set_ops(&omap_pm_ops);
926#endif /* CONFIG_SUSPEND */ 920#endif /* CONFIG_SUSPEND */
diff --git a/arch/arm/mach-omap2/sleep34xx.S b/arch/arm/mach-omap2/sleep34xx.S
index 63f10669571a..f2ea1bd1c691 100644
--- a/arch/arm/mach-omap2/sleep34xx.S
+++ b/arch/arm/mach-omap2/sleep34xx.S
@@ -74,46 +74,6 @@
74 * API functions 74 * API functions
75 */ 75 */
76 76
77/*
78 * The "get_*restore_pointer" functions are used to provide a
79 * physical restore address where the ROM code jumps while waking
80 * up from MPU OFF/OSWR state.
81 * The restore pointer is stored into the scratchpad.
82 */
83
84 .text
85/* Function call to get the restore pointer for resume from OFF */
86ENTRY(get_restore_pointer)
87 stmfd sp!, {lr} @ save registers on stack
88 adr r0, restore
89 ldmfd sp!, {pc} @ restore regs and return
90ENDPROC(get_restore_pointer)
91 .align
92ENTRY(get_restore_pointer_sz)
93 .word . - get_restore_pointer
94
95 .text
96/* Function call to get the restore pointer for 3630 resume from OFF */
97ENTRY(get_omap3630_restore_pointer)
98 stmfd sp!, {lr} @ save registers on stack
99 adr r0, restore_3630
100 ldmfd sp!, {pc} @ restore regs and return
101ENDPROC(get_omap3630_restore_pointer)
102 .align
103ENTRY(get_omap3630_restore_pointer_sz)
104 .word . - get_omap3630_restore_pointer
105
106 .text
107/* Function call to get the restore pointer for ES3 to resume from OFF */
108ENTRY(get_es3_restore_pointer)
109 stmfd sp!, {lr} @ save registers on stack
110 adr r0, restore_es3
111 ldmfd sp!, {pc} @ restore regs and return
112ENDPROC(get_es3_restore_pointer)
113 .align
114ENTRY(get_es3_restore_pointer_sz)
115 .word . - get_es3_restore_pointer
116
117 .text 77 .text
118/* 78/*
119 * L2 cache needs to be toggled for stable OFF mode functionality on 3630. 79 * L2 cache needs to be toggled for stable OFF mode functionality on 3630.
@@ -133,7 +93,7 @@ ENDPROC(enable_omap3630_toggle_l2_on_restore)
133/* Function to call rom code to save secure ram context */ 93/* Function to call rom code to save secure ram context */
134 .align 3 94 .align 3
135ENTRY(save_secure_ram_context) 95ENTRY(save_secure_ram_context)
136 stmfd sp!, {r1-r12, lr} @ save registers on stack 96 stmfd sp!, {r4 - r11, lr} @ save registers on stack
137 adr r3, api_params @ r3 points to parameters 97 adr r3, api_params @ r3 points to parameters
138 str r0, [r3,#0x4] @ r0 has sdram address 98 str r0, [r3,#0x4] @ r0 has sdram address
139 ldr r12, high_mask 99 ldr r12, high_mask
@@ -152,7 +112,7 @@ ENTRY(save_secure_ram_context)
152 nop 112 nop
153 nop 113 nop
154 nop 114 nop
155 ldmfd sp!, {r1-r12, pc} 115 ldmfd sp!, {r4 - r11, pc}
156 .align 116 .align
157sram_phy_addr_mask: 117sram_phy_addr_mask:
158 .word SRAM_BASE_P 118 .word SRAM_BASE_P
@@ -179,69 +139,38 @@ ENTRY(save_secure_ram_context_sz)
179 * 139 *
180 * 140 *
181 * Notes: 141 * Notes:
182 * - this code gets copied to internal SRAM at boot and after wake-up 142 * - only the minimum set of functions gets copied to internal SRAM at boot
183 * from OFF mode. The execution pointer in SRAM is _omap_sram_idle. 143 * and after wake-up from OFF mode, cf. omap_push_sram_idle. The function
144 * pointers in SDRAM or SRAM are called depending on the desired low power
145 * target state.
184 * - when the OMAP wakes up it continues at different execution points 146 * - when the OMAP wakes up it continues at different execution points
185 * depending on the low power mode (non-OFF vs OFF modes), 147 * depending on the low power mode (non-OFF vs OFF modes),
186 * cf. 'Resume path for xxx mode' comments. 148 * cf. 'Resume path for xxx mode' comments.
187 */ 149 */
188 .align 3 150 .align 3
189ENTRY(omap34xx_cpu_suspend) 151ENTRY(omap34xx_cpu_suspend)
190 stmfd sp!, {r0-r12, lr} @ save registers on stack 152 stmfd sp!, {r4 - r11, lr} @ save registers on stack
191 153
192 /* 154 /*
193 * r0 contains CPU context save/restore pointer in sdram 155 * r0 contains information about saving context:
194 * r1 contains information about saving context:
195 * 0 - No context lost 156 * 0 - No context lost
196 * 1 - Only L1 and logic lost 157 * 1 - Only L1 and logic lost
197 * 2 - Only L2 lost (Even L1 is retained we clean it along with L2) 158 * 2 - Only L2 lost (Even L1 is retained we clean it along with L2)
198 * 3 - Both L1 and L2 lost and logic lost 159 * 3 - Both L1 and L2 lost and logic lost
199 */ 160 */
200 161
201 /* Directly jump to WFI is the context save is not required */ 162 /*
202 cmp r1, #0x0 163 * For OFF mode: save context and jump to WFI in SDRAM (omap3_do_wfi)
203 beq omap3_do_wfi 164 * For non-OFF modes: jump to the WFI code in SRAM (omap3_do_wfi_sram)
165 */
166 ldr r4, omap3_do_wfi_sram_addr
167 ldr r5, [r4]
168 cmp r0, #0x0 @ If no context save required,
169 bxeq r5 @ jump to the WFI code in SRAM
170
204 171
205 /* Otherwise fall through to the save context code */ 172 /* Otherwise fall through to the save context code */
206save_context_wfi: 173save_context_wfi:
207 mov r8, r0 @ Store SDRAM address in r8
208 mrc p15, 0, r5, c1, c0, 1 @ Read Auxiliary Control Register
209 mov r4, #0x1 @ Number of parameters for restore call
210 stmia r8!, {r4-r5} @ Push parameters for restore call
211 mrc p15, 1, r5, c9, c0, 2 @ Read L2 AUX ctrl register
212 stmia r8!, {r4-r5} @ Push parameters for restore call
213
214 /* Check what that target sleep state is from r1 */
215 cmp r1, #0x2 @ Only L2 lost, no need to save context
216 beq clean_caches
217
218l1_logic_lost:
219 mov r4, sp @ Store sp
220 mrs r5, spsr @ Store spsr
221 mov r6, lr @ Store lr
222 stmia r8!, {r4-r6}
223
224 mrc p15, 0, r4, c1, c0, 2 @ Coprocessor access control register
225 mrc p15, 0, r5, c2, c0, 0 @ TTBR0
226 mrc p15, 0, r6, c2, c0, 1 @ TTBR1
227 mrc p15, 0, r7, c2, c0, 2 @ TTBCR
228 stmia r8!, {r4-r7}
229
230 mrc p15, 0, r4, c3, c0, 0 @ Domain access Control Register
231 mrc p15, 0, r5, c10, c2, 0 @ PRRR
232 mrc p15, 0, r6, c10, c2, 1 @ NMRR
233 stmia r8!,{r4-r6}
234
235 mrc p15, 0, r4, c13, c0, 1 @ Context ID
236 mrc p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
237 mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
238 mrs r7, cpsr @ Store current cpsr
239 stmia r8!, {r4-r7}
240
241 mrc p15, 0, r4, c1, c0, 0 @ save control register
242 stmia r8!, {r4}
243
244clean_caches:
245 /* 174 /*
246 * jump out to kernel flush routine 175 * jump out to kernel flush routine
247 * - reuse that code is better 176 * - reuse that code is better
@@ -284,7 +213,32 @@ clean_caches:
284 THUMB( nop ) 213 THUMB( nop )
285 .arm 214 .arm
286 215
287omap3_do_wfi: 216 b omap3_do_wfi
217
218/*
219 * Local variables
220 */
221omap3_do_wfi_sram_addr:
222 .word omap3_do_wfi_sram
223kernel_flush:
224 .word v7_flush_dcache_all
225
226/* ===================================
227 * == WFI instruction => Enter idle ==
228 * ===================================
229 */
230
231/*
232 * Do WFI instruction
233 * Includes the resume path for non-OFF modes
234 *
235 * This code gets copied to internal SRAM and is accessible
236 * from both SDRAM and SRAM:
237 * - executed from SRAM for non-off modes (omap3_do_wfi_sram),
238 * - executed from SDRAM for OFF mode (omap3_do_wfi).
239 */
240 .align 3
241ENTRY(omap3_do_wfi)
288 ldr r4, sdrc_power @ read the SDRC_POWER register 242 ldr r4, sdrc_power @ read the SDRC_POWER register
289 ldr r5, [r4] @ read the contents of SDRC_POWER 243 ldr r5, [r4] @ read the contents of SDRC_POWER
290 orr r5, r5, #0x40 @ enable self refresh on idle req 244 orr r5, r5, #0x40 @ enable self refresh on idle req
@@ -316,8 +270,86 @@ omap3_do_wfi:
316 nop 270 nop
317 nop 271 nop
318 nop 272 nop
319 bl wait_sdrc_ok
320 273
274/*
275 * This function implements the erratum ID i581 WA:
276 * SDRC state restore before accessing the SDRAM
277 *
278 * Only used at return from non-OFF mode. For OFF
279 * mode the ROM code configures the SDRC and
280 * the DPLL before calling the restore code directly
281 * from DDR.
282 */
283
284/* Make sure SDRC accesses are ok */
285wait_sdrc_ok:
286
287/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */
288 ldr r4, cm_idlest_ckgen
289wait_dpll3_lock:
290 ldr r5, [r4]
291 tst r5, #1
292 beq wait_dpll3_lock
293
294 ldr r4, cm_idlest1_core
295wait_sdrc_ready:
296 ldr r5, [r4]
297 tst r5, #0x2
298 bne wait_sdrc_ready
299 /* allow DLL powerdown upon hw idle req */
300 ldr r4, sdrc_power
301 ldr r5, [r4]
302 bic r5, r5, #0x40
303 str r5, [r4]
304
305/*
306 * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
307 * base instead.
308 * Be careful not to clobber r7 when maintaing this code.
309 */
310
311is_dll_in_lock_mode:
312 /* Is dll in lock mode? */
313 ldr r4, sdrc_dlla_ctrl
314 ldr r5, [r4]
315 tst r5, #0x4
316 bne exit_nonoff_modes @ Return if locked
317 /* wait till dll locks */
318 adr r7, kick_counter
319wait_dll_lock_timed:
320 ldr r4, wait_dll_lock_counter
321 add r4, r4, #1
322 str r4, [r7, #wait_dll_lock_counter - kick_counter]
323 ldr r4, sdrc_dlla_status
324 /* Wait 20uS for lock */
325 mov r6, #8
326wait_dll_lock:
327 subs r6, r6, #0x1
328 beq kick_dll
329 ldr r5, [r4]
330 and r5, r5, #0x4
331 cmp r5, #0x4
332 bne wait_dll_lock
333 b exit_nonoff_modes @ Return when locked
334
335 /* disable/reenable DLL if not locked */
336kick_dll:
337 ldr r4, sdrc_dlla_ctrl
338 ldr r5, [r4]
339 mov r6, r5
340 bic r6, #(1<<3) @ disable dll
341 str r6, [r4]
342 dsb
343 orr r6, r6, #(1<<3) @ enable dll
344 str r6, [r4]
345 dsb
346 ldr r4, kick_counter
347 add r4, r4, #1
348 str r4, [r7] @ kick_counter
349 b wait_dll_lock_timed
350
351exit_nonoff_modes:
352 /* Re-enable C-bit if needed */
321 mrc p15, 0, r0, c1, c0, 0 353 mrc p15, 0, r0, c1, c0, 0
322 tst r0, #(1 << 2) @ Check C bit enabled? 354 tst r0, #(1 << 2) @ Check C bit enabled?
323 orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared 355 orreq r0, r0, #(1 << 2) @ Enable the C bit if cleared
@@ -329,7 +361,32 @@ omap3_do_wfi:
329 * == Exit point from non-OFF modes == 361 * == Exit point from non-OFF modes ==
330 * =================================== 362 * ===================================
331 */ 363 */
332 ldmfd sp!, {r0-r12, pc} @ restore regs and return 364 ldmfd sp!, {r4 - r11, pc} @ restore regs and return
365
366/*
367 * Local variables
368 */
369sdrc_power:
370 .word SDRC_POWER_V
371cm_idlest1_core:
372 .word CM_IDLEST1_CORE_V
373cm_idlest_ckgen:
374 .word CM_IDLEST_CKGEN_V
375sdrc_dlla_status:
376 .word SDRC_DLLA_STATUS_V
377sdrc_dlla_ctrl:
378 .word SDRC_DLLA_CTRL_V
379 /*
380 * When exporting to userspace while the counters are in SRAM,
381 * these 2 words need to be at the end to facilitate retrival!
382 */
383kick_counter:
384 .word 0
385wait_dll_lock_counter:
386 .word 0
387
388ENTRY(omap3_do_wfi_sz)
389 .word . - omap3_do_wfi
333 390
334 391
335/* 392/*
@@ -346,13 +403,17 @@ omap3_do_wfi:
346 * restore_es3: applies to 34xx >= ES3.0 403 * restore_es3: applies to 34xx >= ES3.0
347 * restore_3630: applies to 36xx 404 * restore_3630: applies to 36xx
348 * restore: common code for 3xxx 405 * restore: common code for 3xxx
406 *
407 * Note: when back from CORE and MPU OFF mode we are running
408 * from SDRAM, without MMU, without the caches and prediction.
409 * Also the SRAM content has been cleared.
349 */ 410 */
350restore_es3: 411ENTRY(omap3_restore_es3)
351 ldr r5, pm_prepwstst_core_p 412 ldr r5, pm_prepwstst_core_p
352 ldr r4, [r5] 413 ldr r4, [r5]
353 and r4, r4, #0x3 414 and r4, r4, #0x3
354 cmp r4, #0x0 @ Check if previous power state of CORE is OFF 415 cmp r4, #0x0 @ Check if previous power state of CORE is OFF
355 bne restore 416 bne omap3_restore @ Fall through to OMAP3 common code
356 adr r0, es3_sdrc_fix 417 adr r0, es3_sdrc_fix
357 ldr r1, sram_base 418 ldr r1, sram_base
358 ldr r2, es3_sdrc_fix_sz 419 ldr r2, es3_sdrc_fix_sz
@@ -364,35 +425,32 @@ copy_to_sram:
364 bne copy_to_sram 425 bne copy_to_sram
365 ldr r1, sram_base 426 ldr r1, sram_base
366 blx r1 427 blx r1
367 b restore 428 b omap3_restore @ Fall through to OMAP3 common code
429ENDPROC(omap3_restore_es3)
368 430
369restore_3630: 431ENTRY(omap3_restore_3630)
370 ldr r1, pm_prepwstst_core_p 432 ldr r1, pm_prepwstst_core_p
371 ldr r2, [r1] 433 ldr r2, [r1]
372 and r2, r2, #0x3 434 and r2, r2, #0x3
373 cmp r2, #0x0 @ Check if previous power state of CORE is OFF 435 cmp r2, #0x0 @ Check if previous power state of CORE is OFF
374 bne restore 436 bne omap3_restore @ Fall through to OMAP3 common code
375 /* Disable RTA before giving control */ 437 /* Disable RTA before giving control */
376 ldr r1, control_mem_rta 438 ldr r1, control_mem_rta
377 mov r2, #OMAP36XX_RTA_DISABLE 439 mov r2, #OMAP36XX_RTA_DISABLE
378 str r2, [r1] 440 str r2, [r1]
441ENDPROC(omap3_restore_3630)
379 442
380 /* Fall through to common code for the remaining logic */ 443 /* Fall through to common code for the remaining logic */
381 444
382restore: 445ENTRY(omap3_restore)
383 /* 446 /*
384 * Check what was the reason for mpu reset and store the reason in r9: 447 * Read the pwstctrl register to check the reason for mpu reset.
385 * 0 - No context lost 448 * This tells us what was lost.
386 * 1 - Only L1 and logic lost
387 * 2 - Only L2 lost - In this case, we wont be here
388 * 3 - Both L1 and L2 lost
389 */ 449 */
390 ldr r1, pm_pwstctrl_mpu 450 ldr r1, pm_pwstctrl_mpu
391 ldr r2, [r1] 451 ldr r2, [r1]
392 and r2, r2, #0x3 452 and r2, r2, #0x3
393 cmp r2, #0x0 @ Check if target power state was OFF or RET 453 cmp r2, #0x0 @ Check if target power state was OFF or RET
394 moveq r9, #0x3 @ MPU OFF => L1 and L2 lost
395 movne r9, #0x1 @ Only L1 and L2 lost => avoid L2 invalidation
396 bne logic_l1_restore 454 bne logic_l1_restore
397 455
398 ldr r0, l2dis_3630 456 ldr r0, l2dis_3630
@@ -471,115 +529,39 @@ logic_l1_restore:
471 orr r1, r1, #2 @ re-enable L2 cache 529 orr r1, r1, #2 @ re-enable L2 cache
472 mcr p15, 0, r1, c1, c0, 1 530 mcr p15, 0, r1, c1, c0, 1
473skipl2reen: 531skipl2reen:
474 mov r1, #0
475 /*
476 * Invalidate all instruction caches to PoU
477 * and flush branch target cache
478 */
479 mcr p15, 0, r1, c7, c5, 0
480 532
481 ldr r4, scratchpad_base 533 /* Now branch to the common CPU resume function */
482 ldr r3, [r4,#0xBC] 534 b cpu_resume
483 adds r3, r3, #16 535ENDPROC(omap3_restore)
484 536
485 ldmia r3!, {r4-r6} 537 .ltorg
486 mov sp, r4 @ Restore sp
487 msr spsr_cxsf, r5 @ Restore spsr
488 mov lr, r6 @ Restore lr
489
490 ldmia r3!, {r4-r7}
491 mcr p15, 0, r4, c1, c0, 2 @ Coprocessor access Control Register
492 mcr p15, 0, r5, c2, c0, 0 @ TTBR0
493 mcr p15, 0, r6, c2, c0, 1 @ TTBR1
494 mcr p15, 0, r7, c2, c0, 2 @ TTBCR
495
496 ldmia r3!,{r4-r6}
497 mcr p15, 0, r4, c3, c0, 0 @ Domain access Control Register
498 mcr p15, 0, r5, c10, c2, 0 @ PRRR
499 mcr p15, 0, r6, c10, c2, 1 @ NMRR
500
501
502 ldmia r3!,{r4-r7}
503 mcr p15, 0, r4, c13, c0, 1 @ Context ID
504 mcr p15, 0, r5, c13, c0, 2 @ User r/w thread and process ID
505 mrc p15, 0, r6, c12, c0, 0 @ Secure or NS vector base address
506 msr cpsr, r7 @ store cpsr
507
508 /* Enabling MMU here */
509 mrc p15, 0, r7, c2, c0, 2 @ Read TTBRControl
510 /* Extract N (0:2) bits and decide whether to use TTBR0 or TTBR1 */
511 and r7, #0x7
512 cmp r7, #0x0
513 beq usettbr0
514ttbr_error:
515 /*
516 * More work needs to be done to support N[0:2] value other than 0
517 * So looping here so that the error can be detected
518 */
519 b ttbr_error
520usettbr0:
521 mrc p15, 0, r2, c2, c0, 0
522 ldr r5, ttbrbit_mask
523 and r2, r5
524 mov r4, pc
525 ldr r5, table_index_mask
526 and r4, r5 @ r4 = 31 to 20 bits of pc
527 /* Extract the value to be written to table entry */
528 ldr r1, table_entry
529 /* r1 has the value to be written to table entry*/
530 add r1, r1, r4
531 /* Getting the address of table entry to modify */
532 lsr r4, #18
533 /* r2 has the location which needs to be modified */
534 add r2, r4
535 /* Storing previous entry of location being modified */
536 ldr r5, scratchpad_base
537 ldr r4, [r2]
538 str r4, [r5, #0xC0]
539 /* Modify the table entry */
540 str r1, [r2]
541 /*
542 * Storing address of entry being modified
543 * - will be restored after enabling MMU
544 */
545 ldr r5, scratchpad_base
546 str r2, [r5, #0xC4]
547
548 mov r0, #0
549 mcr p15, 0, r0, c7, c5, 4 @ Flush prefetch buffer
550 mcr p15, 0, r0, c7, c5, 6 @ Invalidate branch predictor array
551 mcr p15, 0, r0, c8, c5, 0 @ Invalidate instruction TLB
552 mcr p15, 0, r0, c8, c6, 0 @ Invalidate data TLB
553 /*
554 * Restore control register. This enables the MMU.
555 * The caches and prediction are not enabled here, they
556 * will be enabled after restoring the MMU table entry.
557 */
558 ldmia r3!, {r4}
559 /* Store previous value of control register in scratchpad */
560 str r4, [r5, #0xC8]
561 ldr r2, cache_pred_disable_mask
562 and r4, r2
563 mcr p15, 0, r4, c1, c0, 0
564 dsb
565 isb
566 ldr r0, =restoremmu_on
567 bx r0
568 538
569/* 539/*
570 * ============================== 540 * Local variables
571 * == Exit point from OFF mode ==
572 * ==============================
573 */ 541 */
574restoremmu_on: 542pm_prepwstst_core_p:
575 ldmfd sp!, {r0-r12, pc} @ restore regs and return 543 .word PM_PREPWSTST_CORE_P
576 544pm_pwstctrl_mpu:
545 .word PM_PWSTCTRL_MPU_P
546scratchpad_base:
547 .word SCRATCHPAD_BASE_P
548sram_base:
549 .word SRAM_BASE_P + 0x8000
550control_stat:
551 .word CONTROL_STAT
552control_mem_rta:
553 .word CONTROL_MEM_RTA_CTRL
554l2dis_3630:
555 .word 0
577 556
578/* 557/*
579 * Internal functions 558 * Internal functions
580 */ 559 */
581 560
582/* This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0 */ 561/*
562 * This function implements the erratum ID i443 WA, applies to 34xx >= ES3.0
563 * Copied to and run from SRAM in order to reconfigure the SDRC parameters.
564 */
583 .text 565 .text
584 .align 3 566 .align 3
585ENTRY(es3_sdrc_fix) 567ENTRY(es3_sdrc_fix)
@@ -609,6 +591,9 @@ ENTRY(es3_sdrc_fix)
609 str r5, [r4] @ kick off refreshes 591 str r5, [r4] @ kick off refreshes
610 bx lr 592 bx lr
611 593
594/*
595 * Local variables
596 */
612 .align 597 .align
613sdrc_syscfg: 598sdrc_syscfg:
614 .word SDRC_SYSCONFIG_P 599 .word SDRC_SYSCONFIG_P
@@ -627,128 +612,3 @@ sdrc_manual_1:
627ENDPROC(es3_sdrc_fix) 612ENDPROC(es3_sdrc_fix)
628ENTRY(es3_sdrc_fix_sz) 613ENTRY(es3_sdrc_fix_sz)
629 .word . - es3_sdrc_fix 614 .word . - es3_sdrc_fix
630
631/*
632 * This function implements the erratum ID i581 WA:
633 * SDRC state restore before accessing the SDRAM
634 *
635 * Only used at return from non-OFF mode. For OFF
636 * mode the ROM code configures the SDRC and
637 * the DPLL before calling the restore code directly
638 * from DDR.
639 */
640
641/* Make sure SDRC accesses are ok */
642wait_sdrc_ok:
643
644/* DPLL3 must be locked before accessing the SDRC. Maybe the HW ensures this */
645 ldr r4, cm_idlest_ckgen
646wait_dpll3_lock:
647 ldr r5, [r4]
648 tst r5, #1
649 beq wait_dpll3_lock
650
651 ldr r4, cm_idlest1_core
652wait_sdrc_ready:
653 ldr r5, [r4]
654 tst r5, #0x2
655 bne wait_sdrc_ready
656 /* allow DLL powerdown upon hw idle req */
657 ldr r4, sdrc_power
658 ldr r5, [r4]
659 bic r5, r5, #0x40
660 str r5, [r4]
661
662/*
663 * PC-relative stores lead to undefined behaviour in Thumb-2: use a r7 as a
664 * base instead.
665 * Be careful not to clobber r7 when maintaing this code.
666 */
667
668is_dll_in_lock_mode:
669 /* Is dll in lock mode? */
670 ldr r4, sdrc_dlla_ctrl
671 ldr r5, [r4]
672 tst r5, #0x4
673 bxne lr @ Return if locked
674 /* wait till dll locks */
675 adr r7, kick_counter
676wait_dll_lock_timed:
677 ldr r4, wait_dll_lock_counter
678 add r4, r4, #1
679 str r4, [r7, #wait_dll_lock_counter - kick_counter]
680 ldr r4, sdrc_dlla_status
681 /* Wait 20uS for lock */
682 mov r6, #8
683wait_dll_lock:
684 subs r6, r6, #0x1
685 beq kick_dll
686 ldr r5, [r4]
687 and r5, r5, #0x4
688 cmp r5, #0x4
689 bne wait_dll_lock
690 bx lr @ Return when locked
691
692 /* disable/reenable DLL if not locked */
693kick_dll:
694 ldr r4, sdrc_dlla_ctrl
695 ldr r5, [r4]
696 mov r6, r5
697 bic r6, #(1<<3) @ disable dll
698 str r6, [r4]
699 dsb
700 orr r6, r6, #(1<<3) @ enable dll
701 str r6, [r4]
702 dsb
703 ldr r4, kick_counter
704 add r4, r4, #1
705 str r4, [r7] @ kick_counter
706 b wait_dll_lock_timed
707
708 .align
709cm_idlest1_core:
710 .word CM_IDLEST1_CORE_V
711cm_idlest_ckgen:
712 .word CM_IDLEST_CKGEN_V
713sdrc_dlla_status:
714 .word SDRC_DLLA_STATUS_V
715sdrc_dlla_ctrl:
716 .word SDRC_DLLA_CTRL_V
717pm_prepwstst_core_p:
718 .word PM_PREPWSTST_CORE_P
719pm_pwstctrl_mpu:
720 .word PM_PWSTCTRL_MPU_P
721scratchpad_base:
722 .word SCRATCHPAD_BASE_P
723sram_base:
724 .word SRAM_BASE_P + 0x8000
725sdrc_power:
726 .word SDRC_POWER_V
727ttbrbit_mask:
728 .word 0xFFFFC000
729table_index_mask:
730 .word 0xFFF00000
731table_entry:
732 .word 0x00000C02
733cache_pred_disable_mask:
734 .word 0xFFFFE7FB
735control_stat:
736 .word CONTROL_STAT
737control_mem_rta:
738 .word CONTROL_MEM_RTA_CTRL
739kernel_flush:
740 .word v7_flush_dcache_all
741l2dis_3630:
742 .word 0
743 /*
744 * When exporting to userspace while the counters are in SRAM,
745 * these 2 words need to be at the end to facilitate retrival!
746 */
747kick_counter:
748 .word 0
749wait_dll_lock_counter:
750 .word 0
751ENDPROC(omap34xx_cpu_suspend)
752
753ENTRY(omap34xx_cpu_suspend_sz)
754 .word . - omap34xx_cpu_suspend
diff --git a/arch/arm/mach-pnx4008/include/mach/entry-macro.S b/arch/arm/mach-pnx4008/include/mach/entry-macro.S
index 8003037578ed..db7eeebf30d7 100644
--- a/arch/arm/mach-pnx4008/include/mach/entry-macro.S
+++ b/arch/arm/mach-pnx4008/include/mach/entry-macro.S
@@ -120,8 +120,3 @@
1201003: 1201003:
121 .endm 121 .endm
122 122
123
124 .macro irq_prio_table
125 .endm
126
127
diff --git a/arch/arm/mach-pxa/include/mach/pm.h b/arch/arm/mach-pxa/include/mach/pm.h
index f15afe012995..51558bcee999 100644
--- a/arch/arm/mach-pxa/include/mach/pm.h
+++ b/arch/arm/mach-pxa/include/mach/pm.h
@@ -22,8 +22,8 @@ struct pxa_cpu_pm_fns {
22extern struct pxa_cpu_pm_fns *pxa_cpu_pm_fns; 22extern struct pxa_cpu_pm_fns *pxa_cpu_pm_fns;
23 23
24/* sleep.S */ 24/* sleep.S */
25extern void pxa25x_cpu_suspend(unsigned int, long); 25extern int pxa25x_finish_suspend(unsigned long);
26extern void pxa27x_cpu_suspend(unsigned int, long); 26extern int pxa27x_finish_suspend(unsigned long);
27 27
28extern int pxa_pm_enter(suspend_state_t state); 28extern int pxa_pm_enter(suspend_state_t state);
29extern int pxa_pm_prepare(void); 29extern int pxa_pm_prepare(void);
diff --git a/arch/arm/mach-pxa/mfp-pxa2xx.c b/arch/arm/mach-pxa/mfp-pxa2xx.c
index 87ae3129f4f7..b27544bcafcb 100644
--- a/arch/arm/mach-pxa/mfp-pxa2xx.c
+++ b/arch/arm/mach-pxa/mfp-pxa2xx.c
@@ -347,9 +347,9 @@ static int pxa2xx_mfp_suspend(void)
347 if ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) && 347 if ((gpio_desc[i].config & MFP_LPM_KEEP_OUTPUT) &&
348 (GPDR(i) & GPIO_bit(i))) { 348 (GPDR(i) & GPIO_bit(i))) {
349 if (GPLR(i) & GPIO_bit(i)) 349 if (GPLR(i) & GPIO_bit(i))
350 PGSR(i) |= GPIO_bit(i); 350 PGSR(gpio_to_bank(i)) |= GPIO_bit(i);
351 else 351 else
352 PGSR(i) &= ~GPIO_bit(i); 352 PGSR(gpio_to_bank(i)) &= ~GPIO_bit(i);
353 } 353 }
354 } 354 }
355 355
diff --git a/arch/arm/mach-pxa/palmz72.c b/arch/arm/mach-pxa/palmz72.c
index 65f24f0b77e8..5a5329bc33f1 100644
--- a/arch/arm/mach-pxa/palmz72.c
+++ b/arch/arm/mach-pxa/palmz72.c
@@ -33,6 +33,7 @@
33#include <linux/i2c-gpio.h> 33#include <linux/i2c-gpio.h>
34 34
35#include <asm/mach-types.h> 35#include <asm/mach-types.h>
36#include <asm/suspend.h>
36#include <asm/mach/arch.h> 37#include <asm/mach/arch.h>
37#include <asm/mach/map.h> 38#include <asm/mach/map.h>
38 39
diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
index 51e1583265b2..37178a8559b1 100644
--- a/arch/arm/mach-pxa/pm.c
+++ b/arch/arm/mach-pxa/pm.c
@@ -42,7 +42,6 @@ int pxa_pm_enter(suspend_state_t state)
42 42
43 /* *** go zzz *** */ 43 /* *** go zzz *** */
44 pxa_cpu_pm_fns->enter(state); 44 pxa_cpu_pm_fns->enter(state);
45 cpu_init();
46 45
47 if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) { 46 if (state != PM_SUSPEND_STANDBY && pxa_cpu_pm_fns->restore) {
48 /* after sleeping, validate the checksum */ 47 /* after sleeping, validate the checksum */
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c
index fed363cec9c6..9c434d21a271 100644
--- a/arch/arm/mach-pxa/pxa25x.c
+++ b/arch/arm/mach-pxa/pxa25x.c
@@ -25,6 +25,7 @@
25#include <linux/irq.h> 25#include <linux/irq.h>
26 26
27#include <asm/mach/map.h> 27#include <asm/mach/map.h>
28#include <asm/suspend.h>
28#include <mach/hardware.h> 29#include <mach/hardware.h>
29#include <mach/irqs.h> 30#include <mach/irqs.h>
30#include <mach/gpio.h> 31#include <mach/gpio.h>
@@ -244,7 +245,7 @@ static void pxa25x_cpu_pm_enter(suspend_state_t state)
244 245
245 switch (state) { 246 switch (state) {
246 case PM_SUSPEND_MEM: 247 case PM_SUSPEND_MEM:
247 pxa25x_cpu_suspend(PWRMODE_SLEEP, PLAT_PHYS_OFFSET - PAGE_OFFSET); 248 cpu_suspend(PWRMODE_SLEEP, pxa25x_finish_suspend);
248 break; 249 break;
249 } 250 }
250} 251}
diff --git a/arch/arm/mach-pxa/pxa27x.c b/arch/arm/mach-pxa/pxa27x.c
index 2fecbec58d88..9d2400b5f503 100644
--- a/arch/arm/mach-pxa/pxa27x.c
+++ b/arch/arm/mach-pxa/pxa27x.c
@@ -24,6 +24,7 @@
24#include <asm/mach/map.h> 24#include <asm/mach/map.h>
25#include <mach/hardware.h> 25#include <mach/hardware.h>
26#include <asm/irq.h> 26#include <asm/irq.h>
27#include <asm/suspend.h>
27#include <mach/irqs.h> 28#include <mach/irqs.h>
28#include <mach/gpio.h> 29#include <mach/gpio.h>
29#include <mach/pxa27x.h> 30#include <mach/pxa27x.h>
@@ -284,6 +285,11 @@ void pxa27x_cpu_pm_restore(unsigned long *sleep_save)
284void pxa27x_cpu_pm_enter(suspend_state_t state) 285void pxa27x_cpu_pm_enter(suspend_state_t state)
285{ 286{
286 extern void pxa_cpu_standby(void); 287 extern void pxa_cpu_standby(void);
288#ifndef CONFIG_IWMMXT
289 u64 acc0;
290
291 asm volatile("mra %Q0, %R0, acc0" : "=r" (acc0));
292#endif
287 293
288 /* ensure voltage-change sequencer not initiated, which hangs */ 294 /* ensure voltage-change sequencer not initiated, which hangs */
289 PCFR &= ~PCFR_FVC; 295 PCFR &= ~PCFR_FVC;
@@ -299,7 +305,10 @@ void pxa27x_cpu_pm_enter(suspend_state_t state)
299 pxa_cpu_standby(); 305 pxa_cpu_standby();
300 break; 306 break;
301 case PM_SUSPEND_MEM: 307 case PM_SUSPEND_MEM:
302 pxa27x_cpu_suspend(pwrmode, PLAT_PHYS_OFFSET - PAGE_OFFSET); 308 cpu_suspend(pwrmode, pxa27x_finish_suspend);
309#ifndef CONFIG_IWMMXT
310 asm volatile("mar acc0, %Q0, %R0" : "=r" (acc0));
311#endif
303 break; 312 break;
304 } 313 }
305} 314}
diff --git a/arch/arm/mach-pxa/pxa3xx.c b/arch/arm/mach-pxa/pxa3xx.c
index 8521d7d6f1da..ef1c56a67afc 100644
--- a/arch/arm/mach-pxa/pxa3xx.c
+++ b/arch/arm/mach-pxa/pxa3xx.c
@@ -24,6 +24,7 @@
24#include <linux/i2c/pxa-i2c.h> 24#include <linux/i2c/pxa-i2c.h>
25 25
26#include <asm/mach/map.h> 26#include <asm/mach/map.h>
27#include <asm/suspend.h>
27#include <mach/hardware.h> 28#include <mach/hardware.h>
28#include <mach/gpio.h> 29#include <mach/gpio.h>
29#include <mach/pxa3xx-regs.h> 30#include <mach/pxa3xx-regs.h>
@@ -141,8 +142,13 @@ static void pxa3xx_cpu_pm_suspend(void)
141{ 142{
142 volatile unsigned long *p = (volatile void *)0xc0000000; 143 volatile unsigned long *p = (volatile void *)0xc0000000;
143 unsigned long saved_data = *p; 144 unsigned long saved_data = *p;
145#ifndef CONFIG_IWMMXT
146 u64 acc0;
144 147
145 extern void pxa3xx_cpu_suspend(long); 148 asm volatile("mra %Q0, %R0, acc0" : "=r" (acc0));
149#endif
150
151 extern int pxa3xx_finish_suspend(unsigned long);
146 152
147 /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */ 153 /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */
148 CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM); 154 CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM);
@@ -162,11 +168,15 @@ static void pxa3xx_cpu_pm_suspend(void)
162 /* overwrite with the resume address */ 168 /* overwrite with the resume address */
163 *p = virt_to_phys(cpu_resume); 169 *p = virt_to_phys(cpu_resume);
164 170
165 pxa3xx_cpu_suspend(PLAT_PHYS_OFFSET - PAGE_OFFSET); 171 cpu_suspend(0, pxa3xx_finish_suspend);
166 172
167 *p = saved_data; 173 *p = saved_data;
168 174
169 AD3ER = 0; 175 AD3ER = 0;
176
177#ifndef CONFIG_IWMMXT
178 asm volatile("mar acc0, %Q0, %R0" : "=r" (acc0));
179#endif
170} 180}
171 181
172static void pxa3xx_cpu_pm_enter(suspend_state_t state) 182static void pxa3xx_cpu_pm_enter(suspend_state_t state)
diff --git a/arch/arm/mach-pxa/raumfeld.c b/arch/arm/mach-pxa/raumfeld.c
index d130f77b6d11..2f37d43f51b6 100644
--- a/arch/arm/mach-pxa/raumfeld.c
+++ b/arch/arm/mach-pxa/raumfeld.c
@@ -573,10 +573,10 @@ static struct pxafb_mode_info sharp_lq043t3dx02_mode = {
573 .xres = 480, 573 .xres = 480,
574 .yres = 272, 574 .yres = 272,
575 .bpp = 16, 575 .bpp = 16,
576 .hsync_len = 4, 576 .hsync_len = 41,
577 .left_margin = 2, 577 .left_margin = 2,
578 .right_margin = 1, 578 .right_margin = 1,
579 .vsync_len = 1, 579 .vsync_len = 10,
580 .upper_margin = 3, 580 .upper_margin = 3,
581 .lower_margin = 1, 581 .lower_margin = 1,
582 .sync = 0, 582 .sync = 0,
@@ -596,29 +596,31 @@ static void __init raumfeld_lcd_init(void)
596{ 596{
597 int ret; 597 int ret;
598 598
599 pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info);
600
601 /* Earlier devices had the backlight regulator controlled
602 * via PWM, later versions use another controller for that */
603 if ((system_rev & 0xff) < 2) {
604 mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT;
605 pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1);
606 platform_device_register(&raumfeld_pwm_backlight_device);
607 } else
608 platform_device_register(&raumfeld_lt3593_device);
609
610 ret = gpio_request(GPIO_TFT_VA_EN, "display VA enable"); 599 ret = gpio_request(GPIO_TFT_VA_EN, "display VA enable");
611 if (ret < 0) 600 if (ret < 0)
612 pr_warning("Unable to request GPIO_TFT_VA_EN\n"); 601 pr_warning("Unable to request GPIO_TFT_VA_EN\n");
613 else 602 else
614 gpio_direction_output(GPIO_TFT_VA_EN, 1); 603 gpio_direction_output(GPIO_TFT_VA_EN, 1);
615 604
605 msleep(100);
606
616 ret = gpio_request(GPIO_DISPLAY_ENABLE, "display enable"); 607 ret = gpio_request(GPIO_DISPLAY_ENABLE, "display enable");
617 if (ret < 0) 608 if (ret < 0)
618 pr_warning("Unable to request GPIO_DISPLAY_ENABLE\n"); 609 pr_warning("Unable to request GPIO_DISPLAY_ENABLE\n");
619 else 610 else
620 gpio_direction_output(GPIO_DISPLAY_ENABLE, 1); 611 gpio_direction_output(GPIO_DISPLAY_ENABLE, 1);
621 612
613 /* Hardware revision 2 has the backlight regulator controlled
614 * by an LT3593, earlier and later devices use PWM for that. */
615 if ((system_rev & 0xff) == 2) {
616 platform_device_register(&raumfeld_lt3593_device);
617 } else {
618 mfp_cfg_t raumfeld_pwm_pin_config = GPIO17_PWM0_OUT;
619 pxa3xx_mfp_config(&raumfeld_pwm_pin_config, 1);
620 platform_device_register(&raumfeld_pwm_backlight_device);
621 }
622
623 pxa_set_fb_info(NULL, &raumfeld_sharp_lcd_info);
622 platform_device_register(&pxa3xx_device_gcu); 624 platform_device_register(&pxa3xx_device_gcu);
623} 625}
624 626
@@ -657,10 +659,10 @@ static struct lis3lv02d_platform_data lis3_pdata = {
657 659
658#define SPI_AK4104 \ 660#define SPI_AK4104 \
659{ \ 661{ \
660 .modalias = "ak4104", \ 662 .modalias = "ak4104-codec", \
661 .max_speed_hz = 10000, \ 663 .max_speed_hz = 10000, \
662 .bus_num = 0, \ 664 .bus_num = 0, \
663 .chip_select = 0, \ 665 .chip_select = 0, \
664 .controller_data = (void *) GPIO_SPDIF_CS, \ 666 .controller_data = (void *) GPIO_SPDIF_CS, \
665} 667}
666 668
diff --git a/arch/arm/mach-pxa/sleep.S b/arch/arm/mach-pxa/sleep.S
index 6f5368899d84..1e544be9905d 100644
--- a/arch/arm/mach-pxa/sleep.S
+++ b/arch/arm/mach-pxa/sleep.S
@@ -24,20 +24,9 @@
24 24
25#ifdef CONFIG_PXA3xx 25#ifdef CONFIG_PXA3xx
26/* 26/*
27 * pxa3xx_cpu_suspend() - forces CPU into sleep state (S2D3C4) 27 * pxa3xx_finish_suspend() - forces CPU into sleep state (S2D3C4)
28 *
29 * r0 = v:p offset
30 */ 28 */
31ENTRY(pxa3xx_cpu_suspend) 29ENTRY(pxa3xx_finish_suspend)
32
33#ifndef CONFIG_IWMMXT
34 mra r2, r3, acc0
35#endif
36 stmfd sp!, {r2 - r12, lr} @ save registers on stack
37 mov r1, r0
38 ldr r3, =pxa_cpu_resume @ resume function
39 bl cpu_suspend
40
41 mov r0, #0x06 @ S2D3C4 mode 30 mov r0, #0x06 @ S2D3C4 mode
42 mcr p14, 0, r0, c7, c0, 0 @ enter sleep 31 mcr p14, 0, r0, c7, c0, 0 @ enter sleep
43 32
@@ -46,28 +35,18 @@ ENTRY(pxa3xx_cpu_suspend)
46 35
47#ifdef CONFIG_PXA27x 36#ifdef CONFIG_PXA27x
48/* 37/*
49 * pxa27x_cpu_suspend() 38 * pxa27x_finish_suspend()
50 * 39 *
51 * Forces CPU into sleep state. 40 * Forces CPU into sleep state.
52 * 41 *
53 * r0 = value for PWRMODE M field for desired sleep state 42 * r0 = value for PWRMODE M field for desired sleep state
54 * r1 = v:p offset
55 */ 43 */
56ENTRY(pxa27x_cpu_suspend) 44ENTRY(pxa27x_finish_suspend)
57
58#ifndef CONFIG_IWMMXT
59 mra r2, r3, acc0
60#endif
61 stmfd sp!, {r2 - r12, lr} @ save registers on stack
62 mov r4, r0 @ save sleep mode
63 ldr r3, =pxa_cpu_resume @ resume function
64 bl cpu_suspend
65
66 @ Put the processor to sleep 45 @ Put the processor to sleep
67 @ (also workaround for sighting 28071) 46 @ (also workaround for sighting 28071)
68 47
69 @ prepare value for sleep mode 48 @ prepare value for sleep mode
70 mov r1, r4 @ sleep mode 49 mov r1, r0 @ sleep mode
71 50
72 @ prepare pointer to physical address 0 (virtual mapping in generic.c) 51 @ prepare pointer to physical address 0 (virtual mapping in generic.c)
73 mov r2, #UNCACHED_PHYS_0 52 mov r2, #UNCACHED_PHYS_0
@@ -99,21 +78,16 @@ ENTRY(pxa27x_cpu_suspend)
99 78
100#ifdef CONFIG_PXA25x 79#ifdef CONFIG_PXA25x
101/* 80/*
102 * pxa25x_cpu_suspend() 81 * pxa25x_finish_suspend()
103 * 82 *
104 * Forces CPU into sleep state. 83 * Forces CPU into sleep state.
105 * 84 *
106 * r0 = value for PWRMODE M field for desired sleep state 85 * r0 = value for PWRMODE M field for desired sleep state
107 * r1 = v:p offset
108 */ 86 */
109 87
110ENTRY(pxa25x_cpu_suspend) 88ENTRY(pxa25x_finish_suspend)
111 stmfd sp!, {r2 - r12, lr} @ save registers on stack
112 mov r4, r0 @ save sleep mode
113 ldr r3, =pxa_cpu_resume @ resume function
114 bl cpu_suspend
115 @ prepare value for sleep mode 89 @ prepare value for sleep mode
116 mov r1, r4 @ sleep mode 90 mov r1, r0 @ sleep mode
117 91
118 @ prepare pointer to physical address 0 (virtual mapping in generic.c) 92 @ prepare pointer to physical address 0 (virtual mapping in generic.c)
119 mov r2, #UNCACHED_PHYS_0 93 mov r2, #UNCACHED_PHYS_0
@@ -195,16 +169,3 @@ pxa_cpu_do_suspend:
195 mcr p14, 0, r1, c7, c0, 0 @ PWRMODE 169 mcr p14, 0, r1, c7, c0, 0 @ PWRMODE
196 170
19720: b 20b @ loop waiting for sleep 17120: b 20b @ loop waiting for sleep
198
199/*
200 * pxa_cpu_resume()
201 *
202 * entry point from bootloader into kernel during resume
203 */
204 .align 5
205pxa_cpu_resume:
206 ldmfd sp!, {r2, r3}
207#ifndef CONFIG_IWMMXT
208 mar acc0, r2, r3
209#endif
210 ldmfd sp!, {r4 - r12, pc} @ return to caller
diff --git a/arch/arm/mach-pxa/zeus.c b/arch/arm/mach-pxa/zeus.c
index 00363c7ac182..9b99cc164de5 100644
--- a/arch/arm/mach-pxa/zeus.c
+++ b/arch/arm/mach-pxa/zeus.c
@@ -31,6 +31,7 @@
31#include <linux/can/platform/mcp251x.h> 31#include <linux/can/platform/mcp251x.h>
32 32
33#include <asm/mach-types.h> 33#include <asm/mach-types.h>
34#include <asm/suspend.h>
34#include <asm/mach/arch.h> 35#include <asm/mach/arch.h>
35#include <asm/mach/map.h> 36#include <asm/mach/map.h>
36 37
@@ -676,7 +677,7 @@ static struct pxa2xx_udc_mach_info zeus_udc_info = {
676static void zeus_power_off(void) 677static void zeus_power_off(void)
677{ 678{
678 local_irq_disable(); 679 local_irq_disable();
679 pxa27x_cpu_suspend(PWRMODE_DEEPSLEEP, PLAT_PHYS_OFFSET - PAGE_OFFSET); 680 cpu_suspend(PWRMODE_DEEPSLEEP, pxa27x_finish_suspend);
680} 681}
681#else 682#else
682#define zeus_power_off NULL 683#define zeus_power_off NULL
diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig
index b9a9805e4828..dba6d0c1fc17 100644
--- a/arch/arm/mach-realview/Kconfig
+++ b/arch/arm/mach-realview/Kconfig
@@ -50,6 +50,7 @@ config MACH_REALVIEW_PB1176
50 bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S" 50 bool "Support RealView(R) Platform Baseboard for ARM1176JZF-S"
51 select CPU_V6 51 select CPU_V6
52 select ARM_GIC 52 select ARM_GIC
53 select HAVE_TCM
53 help 54 help
54 Include support for the ARM(R) RealView(R) Platform Baseboard for 55 Include support for the ARM(R) RealView(R) Platform Baseboard for
55 ARM1176JZF-S. 56 ARM1176JZF-S.
diff --git a/arch/arm/mach-realview/platsmp.c b/arch/arm/mach-realview/platsmp.c
index 963bf0d8119a..4ae943bafa92 100644
--- a/arch/arm/mach-realview/platsmp.c
+++ b/arch/arm/mach-realview/platsmp.c
@@ -68,14 +68,6 @@ void __init smp_init_cpus(void)
68 68
69void __init platform_smp_prepare_cpus(unsigned int max_cpus) 69void __init platform_smp_prepare_cpus(unsigned int max_cpus)
70{ 70{
71 int i;
72
73 /*
74 * Initialise the present map, which describes the set of CPUs
75 * actually populated at the present time.
76 */
77 for (i = 0; i < max_cpus; i++)
78 set_cpu_present(i, true);
79 71
80 scu_enable(scu_base_addr()); 72 scu_enable(scu_base_addr());
81 73
diff --git a/arch/arm/mach-s3c2412/pm.c b/arch/arm/mach-s3c2412/pm.c
index 752b13a7b3db..f4077efa51fa 100644
--- a/arch/arm/mach-s3c2412/pm.c
+++ b/arch/arm/mach-s3c2412/pm.c
@@ -37,12 +37,10 @@
37 37
38extern void s3c2412_sleep_enter(void); 38extern void s3c2412_sleep_enter(void);
39 39
40static void s3c2412_cpu_suspend(void) 40static int s3c2412_cpu_suspend(unsigned long arg)
41{ 41{
42 unsigned long tmp; 42 unsigned long tmp;
43 43
44 flush_cache_all();
45
46 /* set our standby method to sleep */ 44 /* set our standby method to sleep */
47 45
48 tmp = __raw_readl(S3C2412_PWRCFG); 46 tmp = __raw_readl(S3C2412_PWRCFG);
@@ -50,6 +48,8 @@ static void s3c2412_cpu_suspend(void)
50 __raw_writel(tmp, S3C2412_PWRCFG); 48 __raw_writel(tmp, S3C2412_PWRCFG);
51 49
52 s3c2412_sleep_enter(); 50 s3c2412_sleep_enter();
51
52 panic("sleep resumed to originator?");
53} 53}
54 54
55static void s3c2412_pm_prepare(void) 55static void s3c2412_pm_prepare(void)
diff --git a/arch/arm/mach-s3c2416/pm.c b/arch/arm/mach-s3c2416/pm.c
index 41db2b21e213..9ec54f1d8e75 100644
--- a/arch/arm/mach-s3c2416/pm.c
+++ b/arch/arm/mach-s3c2416/pm.c
@@ -24,10 +24,8 @@
24 24
25extern void s3c2412_sleep_enter(void); 25extern void s3c2412_sleep_enter(void);
26 26
27static void s3c2416_cpu_suspend(void) 27static int s3c2416_cpu_suspend(unsigned long arg)
28{ 28{
29 flush_cache_all();
30
31 /* enable wakeup sources regardless of battery state */ 29 /* enable wakeup sources regardless of battery state */
32 __raw_writel(S3C2443_PWRCFG_SLEEP, S3C2443_PWRCFG); 30 __raw_writel(S3C2443_PWRCFG_SLEEP, S3C2443_PWRCFG);
33 31
@@ -35,6 +33,8 @@ static void s3c2416_cpu_suspend(void)
35 __raw_writel(0x2BED, S3C2443_PWRMODE); 33 __raw_writel(0x2BED, S3C2443_PWRMODE);
36 34
37 s3c2412_sleep_enter(); 35 s3c2412_sleep_enter();
36
37 panic("sleep resumed to originator?");
38} 38}
39 39
40static void s3c2416_pm_prepare(void) 40static void s3c2416_pm_prepare(void)
diff --git a/arch/arm/mach-s3c2440/mach-mini2440.c b/arch/arm/mach-s3c2440/mach-mini2440.c
index dd3120df09fe..fc2dc0b3d4fe 100644
--- a/arch/arm/mach-s3c2440/mach-mini2440.c
+++ b/arch/arm/mach-s3c2440/mach-mini2440.c
@@ -552,7 +552,7 @@ struct mini2440_features_t {
552 struct platform_device *optional[8]; 552 struct platform_device *optional[8];
553}; 553};
554 554
555static void mini2440_parse_features( 555static void __init mini2440_parse_features(
556 struct mini2440_features_t * features, 556 struct mini2440_features_t * features,
557 const char * features_str ) 557 const char * features_str )
558{ 558{
diff --git a/arch/arm/mach-s3c64xx/dev-spi.c b/arch/arm/mach-s3c64xx/dev-spi.c
index 82db072cb836..5e6b42089eb4 100644
--- a/arch/arm/mach-s3c64xx/dev-spi.c
+++ b/arch/arm/mach-s3c64xx/dev-spi.c
@@ -88,6 +88,7 @@ static struct s3c64xx_spi_info s3c64xx_spi0_pdata = {
88 .cfg_gpio = s3c64xx_spi_cfg_gpio, 88 .cfg_gpio = s3c64xx_spi_cfg_gpio,
89 .fifo_lvl_mask = 0x7f, 89 .fifo_lvl_mask = 0x7f,
90 .rx_lvl_offset = 13, 90 .rx_lvl_offset = 13,
91 .tx_st_done = 21,
91}; 92};
92 93
93static u64 spi_dmamask = DMA_BIT_MASK(32); 94static u64 spi_dmamask = DMA_BIT_MASK(32);
@@ -132,6 +133,7 @@ static struct s3c64xx_spi_info s3c64xx_spi1_pdata = {
132 .cfg_gpio = s3c64xx_spi_cfg_gpio, 133 .cfg_gpio = s3c64xx_spi_cfg_gpio,
133 .fifo_lvl_mask = 0x7f, 134 .fifo_lvl_mask = 0x7f,
134 .rx_lvl_offset = 13, 135 .rx_lvl_offset = 13,
136 .tx_st_done = 21,
135}; 137};
136 138
137struct platform_device s3c64xx_device_spi1 = { 139struct platform_device s3c64xx_device_spi1 = {
diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c
index b197171e7d03..204bfafe4bfc 100644
--- a/arch/arm/mach-s3c64xx/dma.c
+++ b/arch/arm/mach-s3c64xx/dma.c
@@ -113,7 +113,7 @@ found:
113 return chan; 113 return chan;
114} 114}
115 115
116int s3c2410_dma_config(unsigned int channel, int xferunit) 116int s3c2410_dma_config(enum dma_ch channel, int xferunit)
117{ 117{
118 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 118 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
119 119
@@ -297,7 +297,7 @@ static int s3c64xx_dma_flush(struct s3c2410_dma_chan *chan)
297 return 0; 297 return 0;
298} 298}
299 299
300int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op) 300int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
301{ 301{
302 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 302 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
303 303
@@ -331,7 +331,7 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl);
331 * 331 *
332 */ 332 */
333 333
334int s3c2410_dma_enqueue(unsigned int channel, void *id, 334int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
335 dma_addr_t data, int size) 335 dma_addr_t data, int size)
336{ 336{
337 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 337 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
@@ -415,7 +415,7 @@ err_buff:
415EXPORT_SYMBOL(s3c2410_dma_enqueue); 415EXPORT_SYMBOL(s3c2410_dma_enqueue);
416 416
417 417
418int s3c2410_dma_devconfig(unsigned int channel, 418int s3c2410_dma_devconfig(enum dma_ch channel,
419 enum s3c2410_dmasrc source, 419 enum s3c2410_dmasrc source,
420 unsigned long devaddr) 420 unsigned long devaddr)
421{ 421{
@@ -463,7 +463,7 @@ int s3c2410_dma_devconfig(unsigned int channel,
463EXPORT_SYMBOL(s3c2410_dma_devconfig); 463EXPORT_SYMBOL(s3c2410_dma_devconfig);
464 464
465 465
466int s3c2410_dma_getposition(unsigned int channel, 466int s3c2410_dma_getposition(enum dma_ch channel,
467 dma_addr_t *src, dma_addr_t *dst) 467 dma_addr_t *src, dma_addr_t *dst)
468{ 468{
469 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 469 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
@@ -487,7 +487,7 @@ EXPORT_SYMBOL(s3c2410_dma_getposition);
487 * get control of an dma channel 487 * get control of an dma channel
488*/ 488*/
489 489
490int s3c2410_dma_request(unsigned int channel, 490int s3c2410_dma_request(enum dma_ch channel,
491 struct s3c2410_dma_client *client, 491 struct s3c2410_dma_client *client,
492 void *dev) 492 void *dev)
493{ 493{
@@ -533,7 +533,7 @@ EXPORT_SYMBOL(s3c2410_dma_request);
533 * allowed to go through. 533 * allowed to go through.
534*/ 534*/
535 535
536int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client) 536int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
537{ 537{
538 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 538 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
539 unsigned long flags; 539 unsigned long flags;
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index bc1c470b7de6..8bad64370689 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -112,7 +112,7 @@ void s3c_pm_save_core(void)
112 * this. 112 * this.
113 */ 113 */
114 114
115static void s3c64xx_cpu_suspend(void) 115static int s3c64xx_cpu_suspend(unsigned long arg)
116{ 116{
117 unsigned long tmp; 117 unsigned long tmp;
118 118
diff --git a/arch/arm/mach-s3c64xx/sleep.S b/arch/arm/mach-s3c64xx/sleep.S
index 1f87732b2320..34313f9c8792 100644
--- a/arch/arm/mach-s3c64xx/sleep.S
+++ b/arch/arm/mach-s3c64xx/sleep.S
@@ -25,29 +25,6 @@
25 25
26 .text 26 .text
27 27
28 /* s3c_cpu_save
29 *
30 * Save enough processor state to allow the restart of the pm.c
31 * code after resume.
32 *
33 * entry:
34 * r1 = v:p offset
35 */
36
37ENTRY(s3c_cpu_save)
38 stmfd sp!, { r4 - r12, lr }
39 ldr r3, =resume_with_mmu
40 bl cpu_suspend
41
42 @@ call final suspend code
43 ldr r0, =pm_cpu_sleep
44 ldr pc, [r0]
45
46 @@ return to the caller, after the MMU is turned on.
47 @@ restore the last bits of the stack and return.
48resume_with_mmu:
49 ldmfd sp!, { r4 - r12, pc } @ return, from sp from s3c_cpu_save
50
51 /* Sleep magic, the word before the resume entry point so that the 28 /* Sleep magic, the word before the resume entry point so that the
52 * bootloader can check for a resumeable image. */ 29 * bootloader can check for a resumeable image. */
53 30
diff --git a/arch/arm/mach-s5p64x0/dev-spi.c b/arch/arm/mach-s5p64x0/dev-spi.c
index e78ee18c76e3..ac825e826326 100644
--- a/arch/arm/mach-s5p64x0/dev-spi.c
+++ b/arch/arm/mach-s5p64x0/dev-spi.c
@@ -112,12 +112,14 @@ static struct s3c64xx_spi_info s5p6440_spi0_pdata = {
112 .cfg_gpio = s5p6440_spi_cfg_gpio, 112 .cfg_gpio = s5p6440_spi_cfg_gpio,
113 .fifo_lvl_mask = 0x1ff, 113 .fifo_lvl_mask = 0x1ff,
114 .rx_lvl_offset = 15, 114 .rx_lvl_offset = 15,
115 .tx_st_done = 25,
115}; 116};
116 117
117static struct s3c64xx_spi_info s5p6450_spi0_pdata = { 118static struct s3c64xx_spi_info s5p6450_spi0_pdata = {
118 .cfg_gpio = s5p6450_spi_cfg_gpio, 119 .cfg_gpio = s5p6450_spi_cfg_gpio,
119 .fifo_lvl_mask = 0x1ff, 120 .fifo_lvl_mask = 0x1ff,
120 .rx_lvl_offset = 15, 121 .rx_lvl_offset = 15,
122 .tx_st_done = 25,
121}; 123};
122 124
123static u64 spi_dmamask = DMA_BIT_MASK(32); 125static u64 spi_dmamask = DMA_BIT_MASK(32);
@@ -160,12 +162,14 @@ static struct s3c64xx_spi_info s5p6440_spi1_pdata = {
160 .cfg_gpio = s5p6440_spi_cfg_gpio, 162 .cfg_gpio = s5p6440_spi_cfg_gpio,
161 .fifo_lvl_mask = 0x7f, 163 .fifo_lvl_mask = 0x7f,
162 .rx_lvl_offset = 15, 164 .rx_lvl_offset = 15,
165 .tx_st_done = 25,
163}; 166};
164 167
165static struct s3c64xx_spi_info s5p6450_spi1_pdata = { 168static struct s3c64xx_spi_info s5p6450_spi1_pdata = {
166 .cfg_gpio = s5p6450_spi_cfg_gpio, 169 .cfg_gpio = s5p6450_spi_cfg_gpio,
167 .fifo_lvl_mask = 0x7f, 170 .fifo_lvl_mask = 0x7f,
168 .rx_lvl_offset = 15, 171 .rx_lvl_offset = 15,
172 .tx_st_done = 25,
169}; 173};
170 174
171struct platform_device s5p64x0_device_spi1 = { 175struct platform_device s5p64x0_device_spi1 = {
diff --git a/arch/arm/mach-s5pc100/dev-spi.c b/arch/arm/mach-s5pc100/dev-spi.c
index 57b19794d9bb..e5d6c4dceb56 100644
--- a/arch/arm/mach-s5pc100/dev-spi.c
+++ b/arch/arm/mach-s5pc100/dev-spi.c
@@ -15,6 +15,7 @@
15#include <mach/dma.h> 15#include <mach/dma.h>
16#include <mach/map.h> 16#include <mach/map.h>
17#include <mach/spi-clocks.h> 17#include <mach/spi-clocks.h>
18#include <mach/irqs.h>
18 19
19#include <plat/s3c64xx-spi.h> 20#include <plat/s3c64xx-spi.h>
20#include <plat/gpio-cfg.h> 21#include <plat/gpio-cfg.h>
@@ -90,6 +91,7 @@ static struct s3c64xx_spi_info s5pc100_spi0_pdata = {
90 .fifo_lvl_mask = 0x7f, 91 .fifo_lvl_mask = 0x7f,
91 .rx_lvl_offset = 13, 92 .rx_lvl_offset = 13,
92 .high_speed = 1, 93 .high_speed = 1,
94 .tx_st_done = 21,
93}; 95};
94 96
95static u64 spi_dmamask = DMA_BIT_MASK(32); 97static u64 spi_dmamask = DMA_BIT_MASK(32);
@@ -134,6 +136,7 @@ static struct s3c64xx_spi_info s5pc100_spi1_pdata = {
134 .fifo_lvl_mask = 0x7f, 136 .fifo_lvl_mask = 0x7f,
135 .rx_lvl_offset = 13, 137 .rx_lvl_offset = 13,
136 .high_speed = 1, 138 .high_speed = 1,
139 .tx_st_done = 21,
137}; 140};
138 141
139struct platform_device s5pc100_device_spi1 = { 142struct platform_device s5pc100_device_spi1 = {
@@ -176,6 +179,7 @@ static struct s3c64xx_spi_info s5pc100_spi2_pdata = {
176 .fifo_lvl_mask = 0x7f, 179 .fifo_lvl_mask = 0x7f,
177 .rx_lvl_offset = 13, 180 .rx_lvl_offset = 13,
178 .high_speed = 1, 181 .high_speed = 1,
182 .tx_st_done = 21,
179}; 183};
180 184
181struct platform_device s5pc100_device_spi2 = { 185struct platform_device s5pc100_device_spi2 = {
diff --git a/arch/arm/mach-s5pv210/dev-spi.c b/arch/arm/mach-s5pv210/dev-spi.c
index e3249a47e3b1..eaf9a7bff7a0 100644
--- a/arch/arm/mach-s5pv210/dev-spi.c
+++ b/arch/arm/mach-s5pv210/dev-spi.c
@@ -85,6 +85,7 @@ static struct s3c64xx_spi_info s5pv210_spi0_pdata = {
85 .fifo_lvl_mask = 0x1ff, 85 .fifo_lvl_mask = 0x1ff,
86 .rx_lvl_offset = 15, 86 .rx_lvl_offset = 15,
87 .high_speed = 1, 87 .high_speed = 1,
88 .tx_st_done = 25,
88}; 89};
89 90
90static u64 spi_dmamask = DMA_BIT_MASK(32); 91static u64 spi_dmamask = DMA_BIT_MASK(32);
@@ -129,6 +130,7 @@ static struct s3c64xx_spi_info s5pv210_spi1_pdata = {
129 .fifo_lvl_mask = 0x7f, 130 .fifo_lvl_mask = 0x7f,
130 .rx_lvl_offset = 15, 131 .rx_lvl_offset = 15,
131 .high_speed = 1, 132 .high_speed = 1,
133 .tx_st_done = 25,
132}; 134};
133 135
134struct platform_device s5pv210_device_spi1 = { 136struct platform_device s5pv210_device_spi1 = {
diff --git a/arch/arm/mach-s5pv210/pm.c b/arch/arm/mach-s5pv210/pm.c
index 24febae3d4c0..309e388a8a83 100644
--- a/arch/arm/mach-s5pv210/pm.c
+++ b/arch/arm/mach-s5pv210/pm.c
@@ -88,7 +88,7 @@ static struct sleep_save s5pv210_core_save[] = {
88 SAVE_ITEM(S3C2410_TCNTO(0)), 88 SAVE_ITEM(S3C2410_TCNTO(0)),
89}; 89};
90 90
91void s5pv210_cpu_suspend(void) 91void s5pv210_cpu_suspend(unsigned long arg)
92{ 92{
93 unsigned long tmp; 93 unsigned long tmp;
94 94
diff --git a/arch/arm/mach-s5pv210/sleep.S b/arch/arm/mach-s5pv210/sleep.S
index a3d649466fb1..e3452ccd4b08 100644
--- a/arch/arm/mach-s5pv210/sleep.S
+++ b/arch/arm/mach-s5pv210/sleep.S
@@ -32,27 +32,6 @@
32 32
33 .text 33 .text
34 34
35 /* s3c_cpu_save
36 *
37 * entry:
38 * r1 = v:p offset
39 */
40
41ENTRY(s3c_cpu_save)
42
43 stmfd sp!, { r3 - r12, lr }
44 ldr r3, =resume_with_mmu
45 bl cpu_suspend
46
47 ldr r0, =pm_cpu_sleep
48 ldr r0, [ r0 ]
49 mov pc, r0
50
51resume_with_mmu:
52 ldmfd sp!, { r3 - r12, pc }
53
54 .ltorg
55
56 /* sleep magic, to allow the bootloader to check for an valid 35 /* sleep magic, to allow the bootloader to check for an valid
57 * image to resume to. Must be the first word before the 36 * image to resume to. Must be the first word before the
58 * s3c_cpu_resume entry. 37 * s3c_cpu_resume entry.
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
index c4661aab22fb..bf85b8b259d5 100644
--- a/arch/arm/mach-sa1100/pm.c
+++ b/arch/arm/mach-sa1100/pm.c
@@ -29,10 +29,11 @@
29 29
30#include <mach/hardware.h> 30#include <mach/hardware.h>
31#include <asm/memory.h> 31#include <asm/memory.h>
32#include <asm/suspend.h>
32#include <asm/system.h> 33#include <asm/system.h>
33#include <asm/mach/time.h> 34#include <asm/mach/time.h>
34 35
35extern void sa1100_cpu_suspend(long); 36extern int sa1100_finish_suspend(unsigned long);
36 37
37#define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x 38#define SAVE(x) sleep_save[SLEEP_SAVE_##x] = x
38#define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x] 39#define RESTORE(x) x = sleep_save[SLEEP_SAVE_##x]
@@ -75,9 +76,7 @@ static int sa11x0_pm_enter(suspend_state_t state)
75 PSPR = virt_to_phys(cpu_resume); 76 PSPR = virt_to_phys(cpu_resume);
76 77
77 /* go zzz */ 78 /* go zzz */
78 sa1100_cpu_suspend(PLAT_PHYS_OFFSET - PAGE_OFFSET); 79 cpu_suspend(0, sa1100_finish_suspend);
79
80 cpu_init();
81 80
82 /* 81 /*
83 * Ensure not to come back here if it wasn't intended 82 * Ensure not to come back here if it wasn't intended
diff --git a/arch/arm/mach-sa1100/sleep.S b/arch/arm/mach-sa1100/sleep.S
index 04f2a618d4ef..e8223315b442 100644
--- a/arch/arm/mach-sa1100/sleep.S
+++ b/arch/arm/mach-sa1100/sleep.S
@@ -22,18 +22,13 @@
22 22
23 .text 23 .text
24/* 24/*
25 * sa1100_cpu_suspend() 25 * sa1100_finish_suspend()
26 * 26 *
27 * Causes sa11x0 to enter sleep state 27 * Causes sa11x0 to enter sleep state
28 * 28 *
29 */ 29 */
30 30
31ENTRY(sa1100_cpu_suspend) 31ENTRY(sa1100_finish_suspend)
32 stmfd sp!, {r4 - r12, lr} @ save registers on stack
33 mov r1, r0
34 ldr r3, =sa1100_cpu_resume @ return function
35 bl cpu_suspend
36
37 @ disable clock switching 32 @ disable clock switching
38 mcr p15, 0, r1, c15, c2, 2 33 mcr p15, 0, r1, c15, c2, 2
39 34
@@ -139,13 +134,3 @@ sa1110_sdram_controller_fix:
139 str r13, [r12] 134 str r13, [r12]
140 135
14120: b 20b @ loop waiting for sleep 13620: b 20b @ loop waiting for sleep
142
143/*
144 * cpu_sa1100_resume()
145 *
146 * entry point from bootloader into kernel during resume
147 */
148 .align 5
149sa1100_cpu_resume:
150 mcr p15, 0, r1, c15, c1, 2 @ enable clock switching
151 ldmfd sp!, {r4 - r12, pc} @ return to caller
diff --git a/arch/arm/mach-shark/include/mach/entry-macro.S b/arch/arm/mach-shark/include/mach/entry-macro.S
index e2853c0a3333..0bb6cc626eb7 100644
--- a/arch/arm/mach-shark/include/mach/entry-macro.S
+++ b/arch/arm/mach-shark/include/mach/entry-macro.S
@@ -11,17 +11,17 @@
11 .endm 11 .endm
12 12
13 .macro get_irqnr_preamble, base, tmp 13 .macro get_irqnr_preamble, base, tmp
14 mov \base, #0xe0000000
14 .endm 15 .endm
15 16
16 .macro arch_ret_to_user, tmp1, tmp2 17 .macro arch_ret_to_user, tmp1, tmp2
17 .endm 18 .endm
18 19
19 .macro get_irqnr_and_base, irqnr, irqstat, base, tmp 20 .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
20 mov r4, #0xe0000000
21 21
22 mov \irqstat, #0x0C 22 mov \irqstat, #0x0C
23 strb \irqstat, [r4, #0x20] @outb(0x0C, 0x20) /* Poll command */ 23 strb \irqstat, [\base, #0x20] @outb(0x0C, 0x20) /* Poll command */
24 ldrb \irqnr, [r4, #0x20] @irq = inb(0x20) & 7 24 ldrb \irqnr, [\base, #0x20] @irq = inb(0x20) & 7
25 and \irqstat, \irqnr, #0x80 25 and \irqstat, \irqnr, #0x80
26 teq \irqstat, #0 26 teq \irqstat, #0
27 beq 43f 27 beq 43f
@@ -29,8 +29,8 @@
29 teq \irqnr, #2 29 teq \irqnr, #2
30 bne 44f 30 bne 44f
3143: mov \irqstat, #0x0C 3143: mov \irqstat, #0x0C
32 strb \irqstat, [r4, #0xa0] @outb(0x0C, 0xA0) /* Poll command */ 32 strb \irqstat, [\base, #0xa0] @outb(0x0C, 0xA0) /* Poll command */
33 ldrb \irqnr, [r4, #0xa0] @irq = (inb(0xA0) & 7) + 8 33 ldrb \irqnr, [\base, #0xa0] @irq = (inb(0xA0) & 7) + 8
34 and \irqstat, \irqnr, #0x80 34 and \irqstat, \irqnr, #0x80
35 teq \irqstat, #0 35 teq \irqstat, #0
36 beq 44f 36 beq 44f
diff --git a/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h b/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h
new file mode 100644
index 000000000000..4a81b01f1e8f
--- /dev/null
+++ b/arch/arm/mach-shmobile/include/mach/sdhi-sh7372.h
@@ -0,0 +1,21 @@
1#ifndef SDHI_SH7372_H
2#define SDHI_SH7372_H
3
4#define SDGENCNTA 0xfe40009c
5
6/* The countdown of SDGENCNTA is controlled by
7 * ZB3D2CLK which runs at 149.5MHz.
8 * That is 149.5ticks/us. Approximate this as 150ticks/us.
9 */
10static void udelay(int us)
11{
12 __raw_writel(us * 150, SDGENCNTA);
13 while(__raw_readl(SDGENCNTA)) ;
14}
15
16static void msleep(int ms)
17{
18 udelay(ms * 1000);
19}
20
21#endif
diff --git a/arch/arm/mach-shmobile/include/mach/sdhi.h b/arch/arm/mach-shmobile/include/mach/sdhi.h
new file mode 100644
index 000000000000..0ec9e69f2c3b
--- /dev/null
+++ b/arch/arm/mach-shmobile/include/mach/sdhi.h
@@ -0,0 +1,16 @@
1#ifndef SDHI_H
2#define SDHI_H
3
4/**************************************************
5 *
6 * CPU specific settings
7 *
8 **************************************************/
9
10#ifdef CONFIG_ARCH_SH7372
11#include "mach/sdhi-sh7372.h"
12#else
13#error "unsupported CPU."
14#endif
15
16#endif /* SDHI_H */
diff --git a/arch/arm/mach-shmobile/platsmp.c b/arch/arm/mach-shmobile/platsmp.c
index f3888feb1c68..66f980625a33 100644
--- a/arch/arm/mach-shmobile/platsmp.c
+++ b/arch/arm/mach-shmobile/platsmp.c
@@ -64,10 +64,5 @@ void __init smp_init_cpus(void)
64 64
65void __init platform_smp_prepare_cpus(unsigned int max_cpus) 65void __init platform_smp_prepare_cpus(unsigned int max_cpus)
66{ 66{
67 int i;
68
69 for (i = 0; i < max_cpus; i++)
70 set_cpu_present(i, true);
71
72 shmobile_smp_prepare_cpus(); 67 shmobile_smp_prepare_cpus();
73} 68}
diff --git a/arch/arm/mach-tegra/platsmp.c b/arch/arm/mach-tegra/platsmp.c
index b8ae3c978dee..1a594dce8fbc 100644
--- a/arch/arm/mach-tegra/platsmp.c
+++ b/arch/arm/mach-tegra/platsmp.c
@@ -129,14 +129,6 @@ void __init smp_init_cpus(void)
129 129
130void __init platform_smp_prepare_cpus(unsigned int max_cpus) 130void __init platform_smp_prepare_cpus(unsigned int max_cpus)
131{ 131{
132 int i;
133
134 /*
135 * Initialise the present map, which describes the set of CPUs
136 * actually populated at the present time.
137 */
138 for (i = 0; i < max_cpus; i++)
139 set_cpu_present(i, true);
140 132
141 scu_enable(scu_base); 133 scu_enable(scu_base);
142} 134}
diff --git a/arch/arm/mach-ux500/platsmp.c b/arch/arm/mach-ux500/platsmp.c
index 0c527fe2cebb..a33df5f4c27a 100644
--- a/arch/arm/mach-ux500/platsmp.c
+++ b/arch/arm/mach-ux500/platsmp.c
@@ -172,14 +172,6 @@ void __init smp_init_cpus(void)
172 172
173void __init platform_smp_prepare_cpus(unsigned int max_cpus) 173void __init platform_smp_prepare_cpus(unsigned int max_cpus)
174{ 174{
175 int i;
176
177 /*
178 * Initialise the present map, which describes the set of CPUs
179 * actually populated at the present time.
180 */
181 for (i = 0; i < max_cpus; i++)
182 set_cpu_present(i, true);
183 175
184 scu_enable(scu_base_addr()); 176 scu_enable(scu_base_addr());
185 wakeup_secondary(); 177 wakeup_secondary();
diff --git a/arch/arm/mach-vexpress/ct-ca9x4.c b/arch/arm/mach-vexpress/ct-ca9x4.c
index 765a71ff7f3b..bfd32f52c2db 100644
--- a/arch/arm/mach-vexpress/ct-ca9x4.c
+++ b/arch/arm/mach-vexpress/ct-ca9x4.c
@@ -229,10 +229,6 @@ static void ct_ca9x4_init_cpu_map(void)
229 229
230static void ct_ca9x4_smp_enable(unsigned int max_cpus) 230static void ct_ca9x4_smp_enable(unsigned int max_cpus)
231{ 231{
232 int i;
233 for (i = 0; i < max_cpus; i++)
234 set_cpu_present(i, true);
235
236 scu_enable(MMIO_P2V(A9_MPCORE_SCU)); 232 scu_enable(MMIO_P2V(A9_MPCORE_SCU));
237} 233}
238#endif 234#endif
diff --git a/arch/arm/mach-vt8500/irq.c b/arch/arm/mach-vt8500/irq.c
index 245140c0df10..642de0408f25 100644
--- a/arch/arm/mach-vt8500/irq.c
+++ b/arch/arm/mach-vt8500/irq.c
@@ -39,9 +39,10 @@
39static void __iomem *ic_regbase; 39static void __iomem *ic_regbase;
40static void __iomem *sic_regbase; 40static void __iomem *sic_regbase;
41 41
42static void vt8500_irq_mask(unsigned int irq) 42static void vt8500_irq_mask(struct irq_data *d)
43{ 43{
44 void __iomem *base = ic_regbase; 44 void __iomem *base = ic_regbase;
45 unsigned irq = d->irq;
45 u8 edge; 46 u8 edge;
46 47
47 if (irq >= 64) { 48 if (irq >= 64) {
@@ -64,9 +65,10 @@ static void vt8500_irq_mask(unsigned int irq)
64 } 65 }
65} 66}
66 67
67static void vt8500_irq_unmask(unsigned int irq) 68static void vt8500_irq_unmask(struct irq_data *d)
68{ 69{
69 void __iomem *base = ic_regbase; 70 void __iomem *base = ic_regbase;
71 unsigned irq = d->irq;
70 u8 dctr; 72 u8 dctr;
71 73
72 if (irq >= 64) { 74 if (irq >= 64) {
@@ -78,10 +80,11 @@ static void vt8500_irq_unmask(unsigned int irq)
78 writeb(dctr, base + VT8500_IC_DCTR + irq); 80 writeb(dctr, base + VT8500_IC_DCTR + irq);
79} 81}
80 82
81static int vt8500_irq_set_type(unsigned int irq, unsigned int flow_type) 83static int vt8500_irq_set_type(struct irq_data *d, unsigned int flow_type)
82{ 84{
83 void __iomem *base = ic_regbase; 85 void __iomem *base = ic_regbase;
84 unsigned int orig_irq = irq; 86 unsigned irq = d->irq;
87 unsigned orig_irq = irq;
85 u8 dctr; 88 u8 dctr;
86 89
87 if (irq >= 64) { 90 if (irq >= 64) {
@@ -114,11 +117,11 @@ static int vt8500_irq_set_type(unsigned int irq, unsigned int flow_type)
114} 117}
115 118
116static struct irq_chip vt8500_irq_chip = { 119static struct irq_chip vt8500_irq_chip = {
117 .name = "vt8500", 120 .name = "vt8500",
118 .ack = vt8500_irq_mask, 121 .irq_ack = vt8500_irq_mask,
119 .mask = vt8500_irq_mask, 122 .irq_mask = vt8500_irq_mask,
120 .unmask = vt8500_irq_unmask, 123 .irq_unmask = vt8500_irq_unmask,
121 .set_type = vt8500_irq_set_type, 124 .irq_set_type = vt8500_irq_set_type,
122}; 125};
123 126
124void __init vt8500_init_irq(void) 127void __init vt8500_init_irq(void)
diff --git a/arch/arm/mm/abort-ev4.S b/arch/arm/mm/abort-ev4.S
index 4f18f9e87bae..54473cd4aba9 100644
--- a/arch/arm/mm/abort-ev4.S
+++ b/arch/arm/mm/abort-ev4.S
@@ -3,14 +3,11 @@
3/* 3/*
4 * Function: v4_early_abort 4 * Function: v4_early_abort
5 * 5 *
6 * Params : r2 = address of aborted instruction 6 * Params : r2 = pt_regs
7 * : r3 = saved SPSR 7 * : r4 = aborted context pc
8 * : r5 = aborted context psr
8 * 9 *
9 * Returns : r0 = address of abort 10 * Returns : r4 - r11, r13 preserved
10 * : r1 = FSR, bit 11 = write
11 * : r2-r8 = corrupted
12 * : r9 = preserved
13 * : sp = pointer to registers
14 * 11 *
15 * Purpose : obtain information about current aborted instruction. 12 * Purpose : obtain information about current aborted instruction.
16 * Note: we read user space. This means we might cause a data 13 * Note: we read user space. This means we might cause a data
@@ -21,10 +18,8 @@
21ENTRY(v4_early_abort) 18ENTRY(v4_early_abort)
22 mrc p15, 0, r1, c5, c0, 0 @ get FSR 19 mrc p15, 0, r1, c5, c0, 0 @ get FSR
23 mrc p15, 0, r0, c6, c0, 0 @ get FAR 20 mrc p15, 0, r0, c6, c0, 0 @ get FAR
24 ldr r3, [r2] @ read aborted ARM instruction 21 ldr r3, [r4] @ read aborted ARM instruction
25 bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR 22 bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
26 tst r3, #1 << 20 @ L = 1 -> write? 23 tst r3, #1 << 20 @ L = 1 -> write?
27 orreq r1, r1, #1 << 11 @ yes. 24 orreq r1, r1, #1 << 11 @ yes.
28 mov pc, lr 25 b do_DataAbort
29
30
diff --git a/arch/arm/mm/abort-ev4t.S b/arch/arm/mm/abort-ev4t.S
index b6282548f922..9da704e7b86e 100644
--- a/arch/arm/mm/abort-ev4t.S
+++ b/arch/arm/mm/abort-ev4t.S
@@ -4,14 +4,11 @@
4/* 4/*
5 * Function: v4t_early_abort 5 * Function: v4t_early_abort
6 * 6 *
7 * Params : r2 = address of aborted instruction 7 * Params : r2 = pt_regs
8 * : r3 = saved SPSR 8 * : r4 = aborted context pc
9 * : r5 = aborted context psr
9 * 10 *
10 * Returns : r0 = address of abort 11 * Returns : r4 - r11, r13 preserved
11 * : r1 = FSR, bit 11 = write
12 * : r2-r8 = corrupted
13 * : r9 = preserved
14 * : sp = pointer to registers
15 * 12 *
16 * Purpose : obtain information about current aborted instruction. 13 * Purpose : obtain information about current aborted instruction.
17 * Note: we read user space. This means we might cause a data 14 * Note: we read user space. This means we might cause a data
@@ -22,9 +19,9 @@
22ENTRY(v4t_early_abort) 19ENTRY(v4t_early_abort)
23 mrc p15, 0, r1, c5, c0, 0 @ get FSR 20 mrc p15, 0, r1, c5, c0, 0 @ get FSR
24 mrc p15, 0, r0, c6, c0, 0 @ get FAR 21 mrc p15, 0, r0, c6, c0, 0 @ get FAR
25 do_thumb_abort 22 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
26 ldreq r3, [r2] @ read aborted ARM instruction 23 ldreq r3, [r4] @ read aborted ARM instruction
27 bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR 24 bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
28 tst r3, #1 << 20 @ check write 25 tst r3, #1 << 20 @ check write
29 orreq r1, r1, #1 << 11 26 orreq r1, r1, #1 << 11
30 mov pc, lr 27 b do_DataAbort
diff --git a/arch/arm/mm/abort-ev5t.S b/arch/arm/mm/abort-ev5t.S
index 02251b526c0d..a0908d4653a3 100644
--- a/arch/arm/mm/abort-ev5t.S
+++ b/arch/arm/mm/abort-ev5t.S
@@ -4,14 +4,11 @@
4/* 4/*
5 * Function: v5t_early_abort 5 * Function: v5t_early_abort
6 * 6 *
7 * Params : r2 = address of aborted instruction 7 * Params : r2 = pt_regs
8 * : r3 = saved SPSR 8 * : r4 = aborted context pc
9 * : r5 = aborted context psr
9 * 10 *
10 * Returns : r0 = address of abort 11 * Returns : r4 - r11, r13 preserved
11 * : r1 = FSR, bit 11 = write
12 * : r2-r8 = corrupted
13 * : r9 = preserved
14 * : sp = pointer to registers
15 * 12 *
16 * Purpose : obtain information about current aborted instruction. 13 * Purpose : obtain information about current aborted instruction.
17 * Note: we read user space. This means we might cause a data 14 * Note: we read user space. This means we might cause a data
@@ -22,10 +19,10 @@
22ENTRY(v5t_early_abort) 19ENTRY(v5t_early_abort)
23 mrc p15, 0, r1, c5, c0, 0 @ get FSR 20 mrc p15, 0, r1, c5, c0, 0 @ get FSR
24 mrc p15, 0, r0, c6, c0, 0 @ get FAR 21 mrc p15, 0, r0, c6, c0, 0 @ get FAR
25 do_thumb_abort 22 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
26 ldreq r3, [r2] @ read aborted ARM instruction 23 ldreq r3, [r4] @ read aborted ARM instruction
27 bic r1, r1, #1 << 11 @ clear bits 11 of FSR 24 bic r1, r1, #1 << 11 @ clear bits 11 of FSR
28 do_ldrd_abort 25 do_ldrd_abort tmp=ip, insn=r3
29 tst r3, #1 << 20 @ check write 26 tst r3, #1 << 20 @ check write
30 orreq r1, r1, #1 << 11 27 orreq r1, r1, #1 << 11
31 mov pc, lr 28 b do_DataAbort
diff --git a/arch/arm/mm/abort-ev5tj.S b/arch/arm/mm/abort-ev5tj.S
index bce68d601c8b..4006b7a61264 100644
--- a/arch/arm/mm/abort-ev5tj.S
+++ b/arch/arm/mm/abort-ev5tj.S
@@ -4,14 +4,11 @@
4/* 4/*
5 * Function: v5tj_early_abort 5 * Function: v5tj_early_abort
6 * 6 *
7 * Params : r2 = address of aborted instruction 7 * Params : r2 = pt_regs
8 * : r3 = saved SPSR 8 * : r4 = aborted context pc
9 * : r5 = aborted context psr
9 * 10 *
10 * Returns : r0 = address of abort 11 * Returns : r4 - r11, r13 preserved
11 * : r1 = FSR, bit 11 = write
12 * : r2-r8 = corrupted
13 * : r9 = preserved
14 * : sp = pointer to registers
15 * 12 *
16 * Purpose : obtain information about current aborted instruction. 13 * Purpose : obtain information about current aborted instruction.
17 * Note: we read user space. This means we might cause a data 14 * Note: we read user space. This means we might cause a data
@@ -23,13 +20,11 @@ ENTRY(v5tj_early_abort)
23 mrc p15, 0, r1, c5, c0, 0 @ get FSR 20 mrc p15, 0, r1, c5, c0, 0 @ get FSR
24 mrc p15, 0, r0, c6, c0, 0 @ get FAR 21 mrc p15, 0, r0, c6, c0, 0 @ get FAR
25 bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR 22 bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
26 tst r3, #PSR_J_BIT @ Java? 23 tst r5, #PSR_J_BIT @ Java?
27 movne pc, lr 24 bne do_DataAbort
28 do_thumb_abort 25 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
29 ldreq r3, [r2] @ read aborted ARM instruction 26 ldreq r3, [r4] @ read aborted ARM instruction
30 do_ldrd_abort 27 do_ldrd_abort tmp=ip, insn=r3
31 tst r3, #1 << 20 @ L = 0 -> write 28 tst r3, #1 << 20 @ L = 0 -> write
32 orreq r1, r1, #1 << 11 @ yes. 29 orreq r1, r1, #1 << 11 @ yes.
33 mov pc, lr 30 b do_DataAbort
34
35
diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S
index 1478aa522144..ff1f7cc11f87 100644
--- a/arch/arm/mm/abort-ev6.S
+++ b/arch/arm/mm/abort-ev6.S
@@ -4,14 +4,11 @@
4/* 4/*
5 * Function: v6_early_abort 5 * Function: v6_early_abort
6 * 6 *
7 * Params : r2 = address of aborted instruction 7 * Params : r2 = pt_regs
8 * : r3 = saved SPSR 8 * : r4 = aborted context pc
9 * : r5 = aborted context psr
9 * 10 *
10 * Returns : r0 = address of abort 11 * Returns : r4 - r11, r13 preserved
11 * : r1 = FSR, bit 11 = write
12 * : r2-r8 = corrupted
13 * : r9 = preserved
14 * : sp = pointer to registers
15 * 12 *
16 * Purpose : obtain information about current aborted instruction. 13 * Purpose : obtain information about current aborted instruction.
17 * Note: we read user space. This means we might cause a data 14 * Note: we read user space. This means we might cause a data
@@ -33,16 +30,14 @@ ENTRY(v6_early_abort)
33 * The test below covers all the write situations, including Java bytecodes 30 * The test below covers all the write situations, including Java bytecodes
34 */ 31 */
35 bic r1, r1, #1 << 11 @ clear bit 11 of FSR 32 bic r1, r1, #1 << 11 @ clear bit 11 of FSR
36 tst r3, #PSR_J_BIT @ Java? 33 tst r5, #PSR_J_BIT @ Java?
37 movne pc, lr 34 bne do_DataAbort
38 do_thumb_abort 35 do_thumb_abort fsr=r1, pc=r4, psr=r5, tmp=r3
39 ldreq r3, [r2] @ read aborted ARM instruction 36 ldreq r3, [r4] @ read aborted ARM instruction
40#ifdef CONFIG_CPU_ENDIAN_BE8 37#ifdef CONFIG_CPU_ENDIAN_BE8
41 reveq r3, r3 38 reveq r3, r3
42#endif 39#endif
43 do_ldrd_abort 40 do_ldrd_abort tmp=ip, insn=r3
44 tst r3, #1 << 20 @ L = 0 -> write 41 tst r3, #1 << 20 @ L = 0 -> write
45 orreq r1, r1, #1 << 11 @ yes. 42 orreq r1, r1, #1 << 11 @ yes.
46 mov pc, lr 43 b do_DataAbort
47
48
diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S
index ec88b157d3bb..703375277ba6 100644
--- a/arch/arm/mm/abort-ev7.S
+++ b/arch/arm/mm/abort-ev7.S
@@ -3,14 +3,11 @@
3/* 3/*
4 * Function: v7_early_abort 4 * Function: v7_early_abort
5 * 5 *
6 * Params : r2 = address of aborted instruction 6 * Params : r2 = pt_regs
7 * : r3 = saved SPSR 7 * : r4 = aborted context pc
8 * : r5 = aborted context psr
8 * 9 *
9 * Returns : r0 = address of abort 10 * Returns : r4 - r11, r13 preserved
10 * : r1 = FSR, bit 11 = write
11 * : r2-r8 = corrupted
12 * : r9 = preserved
13 * : sp = pointer to registers
14 * 11 *
15 * Purpose : obtain information about current aborted instruction. 12 * Purpose : obtain information about current aborted instruction.
16 */ 13 */
@@ -37,18 +34,18 @@ ENTRY(v7_early_abort)
37 ldr r3, =0x40d @ On permission fault 34 ldr r3, =0x40d @ On permission fault
38 and r3, r1, r3 35 and r3, r1, r3
39 cmp r3, #0x0d 36 cmp r3, #0x0d
40 movne pc, lr 37 bne do_DataAbort
41 38
42 mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR 39 mcr p15, 0, r0, c7, c8, 0 @ Retranslate FAR
43 isb 40 isb
44 mrc p15, 0, r2, c7, c4, 0 @ Read the PAR 41 mrc p15, 0, ip, c7, c4, 0 @ Read the PAR
45 and r3, r2, #0x7b @ On translation fault 42 and r3, ip, #0x7b @ On translation fault
46 cmp r3, #0x0b 43 cmp r3, #0x0b
47 movne pc, lr 44 bne do_DataAbort
48 bic r1, r1, #0xf @ Fix up FSR FS[5:0] 45 bic r1, r1, #0xf @ Fix up FSR FS[5:0]
49 and r2, r2, #0x7e 46 and ip, ip, #0x7e
50 orr r1, r1, r2, LSR #1 47 orr r1, r1, ip, LSR #1
51#endif 48#endif
52 49
53 mov pc, lr 50 b do_DataAbort
54ENDPROC(v7_early_abort) 51ENDPROC(v7_early_abort)
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index 9fb7b0e25ea1..f3982580c273 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -3,14 +3,11 @@
3/* 3/*
4 * Function: v4t_late_abort 4 * Function: v4t_late_abort
5 * 5 *
6 * Params : r2 = address of aborted instruction 6 * Params : r2 = pt_regs
7 * : r3 = saved SPSR 7 * : r4 = aborted context pc
8 * : r5 = aborted context psr
8 * 9 *
9 * Returns : r0 = address of abort 10 * Returns : r4-r5, r10-r11, r13 preserved
10 * : r1 = FSR, bit 11 = write
11 * : r2-r8 = corrupted
12 * : r9 = preserved
13 * : sp = pointer to registers
14 * 11 *
15 * Purpose : obtain information about current aborted instruction. 12 * Purpose : obtain information about current aborted instruction.
16 * Note: we read user space. This means we might cause a data 13 * Note: we read user space. This means we might cause a data
@@ -18,7 +15,7 @@
18 * picture. Unfortunately, this does happen. We live with it. 15 * picture. Unfortunately, this does happen. We live with it.
19 */ 16 */
20ENTRY(v4t_late_abort) 17ENTRY(v4t_late_abort)
21 tst r3, #PSR_T_BIT @ check for thumb mode 18 tst r5, #PSR_T_BIT @ check for thumb mode
22#ifdef CONFIG_CPU_CP15_MMU 19#ifdef CONFIG_CPU_CP15_MMU
23 mrc p15, 0, r1, c5, c0, 0 @ get FSR 20 mrc p15, 0, r1, c5, c0, 0 @ get FSR
24 mrc p15, 0, r0, c6, c0, 0 @ get FAR 21 mrc p15, 0, r0, c6, c0, 0 @ get FAR
@@ -28,7 +25,7 @@ ENTRY(v4t_late_abort)
28 mov r1, #0 25 mov r1, #0
29#endif 26#endif
30 bne .data_thumb_abort 27 bne .data_thumb_abort
31 ldr r8, [r2] @ read arm instruction 28 ldr r8, [r4] @ read arm instruction
32 tst r8, #1 << 20 @ L = 1 -> write? 29 tst r8, #1 << 20 @ L = 1 -> write?
33 orreq r1, r1, #1 << 11 @ yes. 30 orreq r1, r1, #1 << 11 @ yes.
34 and r7, r8, #15 << 24 31 and r7, r8, #15 << 24
@@ -47,86 +44,84 @@ ENTRY(v4t_late_abort)
47/* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist> 44/* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist>
48/* a */ b .data_unknown 45/* a */ b .data_unknown
49/* b */ b .data_unknown 46/* b */ b .data_unknown
50/* c */ mov pc, lr @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m 47/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
51/* d */ mov pc, lr @ ldc rd, [rn, #m] 48/* d */ b do_DataAbort @ ldc rd, [rn, #m]
52/* e */ b .data_unknown 49/* e */ b .data_unknown
53/* f */ 50/* f */
54.data_unknown: @ Part of jumptable 51.data_unknown: @ Part of jumptable
55 mov r0, r2 52 mov r0, r4
56 mov r1, r8 53 mov r1, r8
57 mov r2, sp 54 b baddataabort
58 bl baddataabort
59 b ret_from_exception
60 55
61.data_arm_ldmstm: 56.data_arm_ldmstm:
62 tst r8, #1 << 21 @ check writeback bit 57 tst r8, #1 << 21 @ check writeback bit
63 moveq pc, lr @ no writeback -> no fixup 58 beq do_DataAbort @ no writeback -> no fixup
64 mov r7, #0x11 59 mov r7, #0x11
65 orr r7, r7, #0x1100 60 orr r7, r7, #0x1100
66 and r6, r8, r7 61 and r6, r8, r7
67 and r2, r8, r7, lsl #1 62 and r9, r8, r7, lsl #1
68 add r6, r6, r2, lsr #1 63 add r6, r6, r9, lsr #1
69 and r2, r8, r7, lsl #2 64 and r9, r8, r7, lsl #2
70 add r6, r6, r2, lsr #2 65 add r6, r6, r9, lsr #2
71 and r2, r8, r7, lsl #3 66 and r9, r8, r7, lsl #3
72 add r6, r6, r2, lsr #3 67 add r6, r6, r9, lsr #3
73 add r6, r6, r6, lsr #8 68 add r6, r6, r6, lsr #8
74 add r6, r6, r6, lsr #4 69 add r6, r6, r6, lsr #4
75 and r6, r6, #15 @ r6 = no. of registers to transfer. 70 and r6, r6, #15 @ r6 = no. of registers to transfer.
76 and r5, r8, #15 << 16 @ Extract 'n' from instruction 71 and r9, r8, #15 << 16 @ Extract 'n' from instruction
77 ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' 72 ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
78 tst r8, #1 << 23 @ Check U bit 73 tst r8, #1 << 23 @ Check U bit
79 subne r7, r7, r6, lsl #2 @ Undo increment 74 subne r7, r7, r6, lsl #2 @ Undo increment
80 addeq r7, r7, r6, lsl #2 @ Undo decrement 75 addeq r7, r7, r6, lsl #2 @ Undo decrement
81 str r7, [sp, r5, lsr #14] @ Put register 'Rn' 76 str r7, [r2, r9, lsr #14] @ Put register 'Rn'
82 mov pc, lr 77 b do_DataAbort
83 78
84.data_arm_lateldrhpre: 79.data_arm_lateldrhpre:
85 tst r8, #1 << 21 @ Check writeback bit 80 tst r8, #1 << 21 @ Check writeback bit
86 moveq pc, lr @ No writeback -> no fixup 81 beq do_DataAbort @ No writeback -> no fixup
87.data_arm_lateldrhpost: 82.data_arm_lateldrhpost:
88 and r5, r8, #0x00f @ get Rm / low nibble of immediate value 83 and r9, r8, #0x00f @ get Rm / low nibble of immediate value
89 tst r8, #1 << 22 @ if (immediate offset) 84 tst r8, #1 << 22 @ if (immediate offset)
90 andne r6, r8, #0xf00 @ { immediate high nibble 85 andne r6, r8, #0xf00 @ { immediate high nibble
91 orrne r6, r5, r6, lsr #4 @ combine nibbles } else 86 orrne r6, r9, r6, lsr #4 @ combine nibbles } else
92 ldreq r6, [sp, r5, lsl #2] @ { load Rm value } 87 ldreq r6, [r2, r9, lsl #2] @ { load Rm value }
93.data_arm_apply_r6_and_rn: 88.data_arm_apply_r6_and_rn:
94 and r5, r8, #15 << 16 @ Extract 'n' from instruction 89 and r9, r8, #15 << 16 @ Extract 'n' from instruction
95 ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' 90 ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
96 tst r8, #1 << 23 @ Check U bit 91 tst r8, #1 << 23 @ Check U bit
97 subne r7, r7, r6 @ Undo incrmenet 92 subne r7, r7, r6 @ Undo incrmenet
98 addeq r7, r7, r6 @ Undo decrement 93 addeq r7, r7, r6 @ Undo decrement
99 str r7, [sp, r5, lsr #14] @ Put register 'Rn' 94 str r7, [r2, r9, lsr #14] @ Put register 'Rn'
100 mov pc, lr 95 b do_DataAbort
101 96
102.data_arm_lateldrpreconst: 97.data_arm_lateldrpreconst:
103 tst r8, #1 << 21 @ check writeback bit 98 tst r8, #1 << 21 @ check writeback bit
104 moveq pc, lr @ no writeback -> no fixup 99 beq do_DataAbort @ no writeback -> no fixup
105.data_arm_lateldrpostconst: 100.data_arm_lateldrpostconst:
106 movs r2, r8, lsl #20 @ Get offset 101 movs r6, r8, lsl #20 @ Get offset
107 moveq pc, lr @ zero -> no fixup 102 beq do_DataAbort @ zero -> no fixup
108 and r5, r8, #15 << 16 @ Extract 'n' from instruction 103 and r9, r8, #15 << 16 @ Extract 'n' from instruction
109 ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' 104 ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
110 tst r8, #1 << 23 @ Check U bit 105 tst r8, #1 << 23 @ Check U bit
111 subne r7, r7, r2, lsr #20 @ Undo increment 106 subne r7, r7, r6, lsr #20 @ Undo increment
112 addeq r7, r7, r2, lsr #20 @ Undo decrement 107 addeq r7, r7, r6, lsr #20 @ Undo decrement
113 str r7, [sp, r5, lsr #14] @ Put register 'Rn' 108 str r7, [r2, r9, lsr #14] @ Put register 'Rn'
114 mov pc, lr 109 b do_DataAbort
115 110
116.data_arm_lateldrprereg: 111.data_arm_lateldrprereg:
117 tst r8, #1 << 21 @ check writeback bit 112 tst r8, #1 << 21 @ check writeback bit
118 moveq pc, lr @ no writeback -> no fixup 113 beq do_DataAbort @ no writeback -> no fixup
119.data_arm_lateldrpostreg: 114.data_arm_lateldrpostreg:
120 and r7, r8, #15 @ Extract 'm' from instruction 115 and r7, r8, #15 @ Extract 'm' from instruction
121 ldr r6, [sp, r7, lsl #2] @ Get register 'Rm' 116 ldr r6, [r2, r7, lsl #2] @ Get register 'Rm'
122 mov r5, r8, lsr #7 @ get shift count 117 mov r9, r8, lsr #7 @ get shift count
123 ands r5, r5, #31 118 ands r9, r9, #31
124 and r7, r8, #0x70 @ get shift type 119 and r7, r8, #0x70 @ get shift type
125 orreq r7, r7, #8 @ shift count = 0 120 orreq r7, r7, #8 @ shift count = 0
126 add pc, pc, r7 121 add pc, pc, r7
127 nop 122 nop
128 123
129 mov r6, r6, lsl r5 @ 0: LSL #!0 124 mov r6, r6, lsl r9 @ 0: LSL #!0
130 b .data_arm_apply_r6_and_rn 125 b .data_arm_apply_r6_and_rn
131 b .data_arm_apply_r6_and_rn @ 1: LSL #0 126 b .data_arm_apply_r6_and_rn @ 1: LSL #0
132 nop 127 nop
@@ -134,7 +129,7 @@ ENTRY(v4t_late_abort)
134 nop 129 nop
135 b .data_unknown @ 3: MUL? 130 b .data_unknown @ 3: MUL?
136 nop 131 nop
137 mov r6, r6, lsr r5 @ 4: LSR #!0 132 mov r6, r6, lsr r9 @ 4: LSR #!0
138 b .data_arm_apply_r6_and_rn 133 b .data_arm_apply_r6_and_rn
139 mov r6, r6, lsr #32 @ 5: LSR #32 134 mov r6, r6, lsr #32 @ 5: LSR #32
140 b .data_arm_apply_r6_and_rn 135 b .data_arm_apply_r6_and_rn
@@ -142,7 +137,7 @@ ENTRY(v4t_late_abort)
142 nop 137 nop
143 b .data_unknown @ 7: MUL? 138 b .data_unknown @ 7: MUL?
144 nop 139 nop
145 mov r6, r6, asr r5 @ 8: ASR #!0 140 mov r6, r6, asr r9 @ 8: ASR #!0
146 b .data_arm_apply_r6_and_rn 141 b .data_arm_apply_r6_and_rn
147 mov r6, r6, asr #32 @ 9: ASR #32 142 mov r6, r6, asr #32 @ 9: ASR #32
148 b .data_arm_apply_r6_and_rn 143 b .data_arm_apply_r6_and_rn
@@ -150,7 +145,7 @@ ENTRY(v4t_late_abort)
150 nop 145 nop
151 b .data_unknown @ B: MUL? 146 b .data_unknown @ B: MUL?
152 nop 147 nop
153 mov r6, r6, ror r5 @ C: ROR #!0 148 mov r6, r6, ror r9 @ C: ROR #!0
154 b .data_arm_apply_r6_and_rn 149 b .data_arm_apply_r6_and_rn
155 mov r6, r6, rrx @ D: RRX 150 mov r6, r6, rrx @ D: RRX
156 b .data_arm_apply_r6_and_rn 151 b .data_arm_apply_r6_and_rn
@@ -159,7 +154,7 @@ ENTRY(v4t_late_abort)
159 b .data_unknown @ F: MUL? 154 b .data_unknown @ F: MUL?
160 155
161.data_thumb_abort: 156.data_thumb_abort:
162 ldrh r8, [r2] @ read instruction 157 ldrh r8, [r4] @ read instruction
163 tst r8, #1 << 11 @ L = 1 -> write? 158 tst r8, #1 << 11 @ L = 1 -> write?
164 orreq r1, r1, #1 << 8 @ yes 159 orreq r1, r1, #1 << 8 @ yes
165 and r7, r8, #15 << 12 160 and r7, r8, #15 << 12
@@ -172,10 +167,10 @@ ENTRY(v4t_late_abort)
172/* 3 */ b .data_unknown 167/* 3 */ b .data_unknown
173/* 4 */ b .data_unknown 168/* 4 */ b .data_unknown
174/* 5 */ b .data_thumb_reg 169/* 5 */ b .data_thumb_reg
175/* 6 */ mov pc, lr 170/* 6 */ b do_DataAbort
176/* 7 */ mov pc, lr 171/* 7 */ b do_DataAbort
177/* 8 */ mov pc, lr 172/* 8 */ b do_DataAbort
178/* 9 */ mov pc, lr 173/* 9 */ b do_DataAbort
179/* A */ b .data_unknown 174/* A */ b .data_unknown
180/* B */ b .data_thumb_pushpop 175/* B */ b .data_thumb_pushpop
181/* C */ b .data_thumb_ldmstm 176/* C */ b .data_thumb_ldmstm
@@ -185,41 +180,41 @@ ENTRY(v4t_late_abort)
185 180
186.data_thumb_reg: 181.data_thumb_reg:
187 tst r8, #1 << 9 182 tst r8, #1 << 9
188 moveq pc, lr 183 beq do_DataAbort
189 tst r8, #1 << 10 @ If 'S' (signed) bit is set 184 tst r8, #1 << 10 @ If 'S' (signed) bit is set
190 movne r1, #0 @ it must be a load instr 185 movne r1, #0 @ it must be a load instr
191 mov pc, lr 186 b do_DataAbort
192 187
193.data_thumb_pushpop: 188.data_thumb_pushpop:
194 tst r8, #1 << 10 189 tst r8, #1 << 10
195 beq .data_unknown 190 beq .data_unknown
196 and r6, r8, #0x55 @ hweight8(r8) + R bit 191 and r6, r8, #0x55 @ hweight8(r8) + R bit
197 and r2, r8, #0xaa 192 and r9, r8, #0xaa
198 add r6, r6, r2, lsr #1 193 add r6, r6, r9, lsr #1
199 and r2, r6, #0xcc 194 and r9, r6, #0xcc
200 and r6, r6, #0x33 195 and r6, r6, #0x33
201 add r6, r6, r2, lsr #2 196 add r6, r6, r9, lsr #2
202 movs r7, r8, lsr #9 @ C = r8 bit 8 (R bit) 197 movs r7, r8, lsr #9 @ C = r8 bit 8 (R bit)
203 adc r6, r6, r6, lsr #4 @ high + low nibble + R bit 198 adc r6, r6, r6, lsr #4 @ high + low nibble + R bit
204 and r6, r6, #15 @ number of regs to transfer 199 and r6, r6, #15 @ number of regs to transfer
205 ldr r7, [sp, #13 << 2] 200 ldr r7, [r2, #13 << 2]
206 tst r8, #1 << 11 201 tst r8, #1 << 11
207 addeq r7, r7, r6, lsl #2 @ increment SP if PUSH 202 addeq r7, r7, r6, lsl #2 @ increment SP if PUSH
208 subne r7, r7, r6, lsl #2 @ decrement SP if POP 203 subne r7, r7, r6, lsl #2 @ decrement SP if POP
209 str r7, [sp, #13 << 2] 204 str r7, [r2, #13 << 2]
210 mov pc, lr 205 b do_DataAbort
211 206
212.data_thumb_ldmstm: 207.data_thumb_ldmstm:
213 and r6, r8, #0x55 @ hweight8(r8) 208 and r6, r8, #0x55 @ hweight8(r8)
214 and r2, r8, #0xaa 209 and r9, r8, #0xaa
215 add r6, r6, r2, lsr #1 210 add r6, r6, r9, lsr #1
216 and r2, r6, #0xcc 211 and r9, r6, #0xcc
217 and r6, r6, #0x33 212 and r6, r6, #0x33
218 add r6, r6, r2, lsr #2 213 add r6, r6, r9, lsr #2
219 add r6, r6, r6, lsr #4 214 add r6, r6, r6, lsr #4
220 and r5, r8, #7 << 8 215 and r9, r8, #7 << 8
221 ldr r7, [sp, r5, lsr #6] 216 ldr r7, [r2, r9, lsr #6]
222 and r6, r6, #15 @ number of regs to transfer 217 and r6, r6, #15 @ number of regs to transfer
223 sub r7, r7, r6, lsl #2 @ always decrement 218 sub r7, r7, r6, lsl #2 @ always decrement
224 str r7, [sp, r5, lsr #6] 219 str r7, [r2, r9, lsr #6]
225 mov pc, lr 220 b do_DataAbort
diff --git a/arch/arm/mm/abort-macro.S b/arch/arm/mm/abort-macro.S
index d7cb1bfa51a4..52162d59407a 100644
--- a/arch/arm/mm/abort-macro.S
+++ b/arch/arm/mm/abort-macro.S
@@ -9,34 +9,32 @@
9 * 9 *
10 */ 10 */
11 11
12 .macro do_thumb_abort 12 .macro do_thumb_abort, fsr, pc, psr, tmp
13 tst r3, #PSR_T_BIT 13 tst \psr, #PSR_T_BIT
14 beq not_thumb 14 beq not_thumb
15 ldrh r3, [r2] @ Read aborted Thumb instruction 15 ldrh \tmp, [\pc] @ Read aborted Thumb instruction
16 and r3, r3, # 0xfe00 @ Mask opcode field 16 and \tmp, \tmp, # 0xfe00 @ Mask opcode field
17 cmp r3, # 0x5600 @ Is it ldrsb? 17 cmp \tmp, # 0x5600 @ Is it ldrsb?
18 orreq r3, r3, #1 << 11 @ Set L-bit if yes 18 orreq \tmp, \tmp, #1 << 11 @ Set L-bit if yes
19 tst r3, #1 << 11 @ L = 0 -> write 19 tst \tmp, #1 << 11 @ L = 0 -> write
20 orreq r1, r1, #1 << 11 @ yes. 20 orreq \psr, \psr, #1 << 11 @ yes.
21 mov pc, lr 21 b do_DataAbort
22not_thumb: 22not_thumb:
23 .endm 23 .endm
24 24
25/* 25/*
26 * We check for the following insturction encoding for LDRD. 26 * We check for the following instruction encoding for LDRD.
27 * 27 *
28 * [27:25] == 0 28 * [27:25] == 000
29 * [7:4] == 1101 29 * [7:4] == 1101
30 * [20] == 0 30 * [20] == 0
31 */ 31 */
32 .macro do_ldrd_abort 32 .macro do_ldrd_abort, tmp, insn
33 tst r3, #0x0e000000 @ [27:25] == 0 33 tst \insn, #0x0e100000 @ [27:25,20] == 0
34 bne not_ldrd 34 bne not_ldrd
35 and r2, r3, #0x000000f0 @ [7:4] == 1101 35 and \tmp, \insn, #0x000000f0 @ [7:4] == 1101
36 cmp r2, #0x000000d0 36 cmp \tmp, #0x000000d0
37 bne not_ldrd 37 beq do_DataAbort
38 tst r3, #1 << 20 @ [20] == 0
39 moveq pc, lr
40not_ldrd: 38not_ldrd:
41 .endm 39 .endm
42 40
diff --git a/arch/arm/mm/abort-nommu.S b/arch/arm/mm/abort-nommu.S
index 625e580945b5..119cb479c2ab 100644
--- a/arch/arm/mm/abort-nommu.S
+++ b/arch/arm/mm/abort-nommu.S
@@ -3,11 +3,11 @@
3/* 3/*
4 * Function: nommu_early_abort 4 * Function: nommu_early_abort
5 * 5 *
6 * Params : r2 = address of aborted instruction 6 * Params : r2 = pt_regs
7 * : r3 = saved SPSR 7 * : r4 = aborted context pc
8 * : r5 = aborted context psr
8 * 9 *
9 * Returns : r0 = 0 (abort address) 10 * Returns : r4 - r11, r13 preserved
10 * : r1 = 0 (FSR)
11 * 11 *
12 * Note: There is no FSR/FAR on !CPU_CP15_MMU cores. 12 * Note: There is no FSR/FAR on !CPU_CP15_MMU cores.
13 * Just fill zero into the registers. 13 * Just fill zero into the registers.
@@ -16,5 +16,5 @@
16ENTRY(nommu_early_abort) 16ENTRY(nommu_early_abort)
17 mov r0, #0 @ clear r0, r1 (no FSR/FAR) 17 mov r0, #0 @ clear r0, r1 (no FSR/FAR)
18 mov r1, #0 18 mov r1, #0
19 mov pc, lr 19 b do_DataAbort
20ENDPROC(nommu_early_abort) 20ENDPROC(nommu_early_abort)
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 724ba3bce72c..be7c638b648b 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -727,6 +727,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
727 int isize = 4; 727 int isize = 4;
728 int thumb2_32b = 0; 728 int thumb2_32b = 0;
729 729
730 if (interrupts_enabled(regs))
731 local_irq_enable();
732
730 instrptr = instruction_pointer(regs); 733 instrptr = instruction_pointer(regs);
731 734
732 fs = get_fs(); 735 fs = get_fs();
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index ef59099a5463..44c086710d2b 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -120,17 +120,22 @@ static void l2x0_cache_sync(void)
120 spin_unlock_irqrestore(&l2x0_lock, flags); 120 spin_unlock_irqrestore(&l2x0_lock, flags);
121} 121}
122 122
123static void l2x0_flush_all(void) 123static void __l2x0_flush_all(void)
124{ 124{
125 unsigned long flags;
126
127 /* clean all ways */
128 spin_lock_irqsave(&l2x0_lock, flags);
129 debug_writel(0x03); 125 debug_writel(0x03);
130 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); 126 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
131 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); 127 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
132 cache_sync(); 128 cache_sync();
133 debug_writel(0x00); 129 debug_writel(0x00);
130}
131
132static void l2x0_flush_all(void)
133{
134 unsigned long flags;
135
136 /* clean all ways */
137 spin_lock_irqsave(&l2x0_lock, flags);
138 __l2x0_flush_all();
134 spin_unlock_irqrestore(&l2x0_lock, flags); 139 spin_unlock_irqrestore(&l2x0_lock, flags);
135} 140}
136 141
@@ -266,7 +271,9 @@ static void l2x0_disable(void)
266 unsigned long flags; 271 unsigned long flags;
267 272
268 spin_lock_irqsave(&l2x0_lock, flags); 273 spin_lock_irqsave(&l2x0_lock, flags);
269 writel(0, l2x0_base + L2X0_CTRL); 274 __l2x0_flush_all();
275 writel_relaxed(0, l2x0_base + L2X0_CTRL);
276 dsb();
270 spin_unlock_irqrestore(&l2x0_lock, flags); 277 spin_unlock_irqrestore(&l2x0_lock, flags);
271} 278}
272 279
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index bdba6c65c901..63cca0097130 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -41,7 +41,6 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
41 kfrom = kmap_atomic(from, KM_USER0); 41 kfrom = kmap_atomic(from, KM_USER0);
42 kto = kmap_atomic(to, KM_USER1); 42 kto = kmap_atomic(to, KM_USER1);
43 copy_page(kto, kfrom); 43 copy_page(kto, kfrom);
44 __cpuc_flush_dcache_area(kto, PAGE_SIZE);
45 kunmap_atomic(kto, KM_USER1); 44 kunmap_atomic(kto, KM_USER1);
46 kunmap_atomic(kfrom, KM_USER0); 45 kunmap_atomic(kfrom, KM_USER0);
47} 46}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 82a093cee09a..0a0a1e7c20d2 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -25,9 +25,11 @@
25#include <asm/tlbflush.h> 25#include <asm/tlbflush.h>
26#include <asm/sizes.h> 26#include <asm/sizes.h>
27 27
28#include "mm.h"
29
28static u64 get_coherent_dma_mask(struct device *dev) 30static u64 get_coherent_dma_mask(struct device *dev)
29{ 31{
30 u64 mask = ISA_DMA_THRESHOLD; 32 u64 mask = (u64)arm_dma_limit;
31 33
32 if (dev) { 34 if (dev) {
33 mask = dev->coherent_dma_mask; 35 mask = dev->coherent_dma_mask;
@@ -41,10 +43,10 @@ static u64 get_coherent_dma_mask(struct device *dev)
41 return 0; 43 return 0;
42 } 44 }
43 45
44 if ((~mask) & ISA_DMA_THRESHOLD) { 46 if ((~mask) & (u64)arm_dma_limit) {
45 dev_warn(dev, "coherent DMA mask %#llx is smaller " 47 dev_warn(dev, "coherent DMA mask %#llx is smaller "
46 "than system GFP_DMA mask %#llx\n", 48 "than system GFP_DMA mask %#llx\n",
47 mask, (unsigned long long)ISA_DMA_THRESHOLD); 49 mask, (u64)arm_dma_limit);
48 return 0; 50 return 0;
49 } 51 }
50 } 52 }
@@ -657,6 +659,33 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
657} 659}
658EXPORT_SYMBOL(dma_sync_sg_for_device); 660EXPORT_SYMBOL(dma_sync_sg_for_device);
659 661
662/*
663 * Return whether the given device DMA address mask can be supported
664 * properly. For example, if your device can only drive the low 24-bits
665 * during bus mastering, then you would pass 0x00ffffff as the mask
666 * to this function.
667 */
668int dma_supported(struct device *dev, u64 mask)
669{
670 if (mask < (u64)arm_dma_limit)
671 return 0;
672 return 1;
673}
674EXPORT_SYMBOL(dma_supported);
675
676int dma_set_mask(struct device *dev, u64 dma_mask)
677{
678 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
679 return -EIO;
680
681#ifndef CONFIG_DMABOUNCE
682 *dev->dma_mask = dma_mask;
683#endif
684
685 return 0;
686}
687EXPORT_SYMBOL(dma_set_mask);
688
660#define PREALLOC_DMA_DEBUG_ENTRIES 4096 689#define PREALLOC_DMA_DEBUG_ENTRIES 4096
661 690
662static int __init dma_debug_do_init(void) 691static int __init dma_debug_do_init(void)
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index bc0e1d88fd3b..55657c222d7c 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -94,7 +94,7 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
94 94
95 pud = pud_offset(pgd, addr); 95 pud = pud_offset(pgd, addr);
96 if (PTRS_PER_PUD != 1) 96 if (PTRS_PER_PUD != 1)
97 printk(", *pud=%08lx", pud_val(*pud)); 97 printk(", *pud=%08llx", (long long)pud_val(*pud));
98 98
99 if (pud_none(*pud)) 99 if (pud_none(*pud))
100 break; 100 break;
@@ -285,6 +285,10 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
285 tsk = current; 285 tsk = current;
286 mm = tsk->mm; 286 mm = tsk->mm;
287 287
288 /* Enable interrupts if they were enabled in the parent context. */
289 if (interrupts_enabled(regs))
290 local_irq_enable();
291
288 /* 292 /*
289 * If we're in an interrupt or have no user 293 * If we're in an interrupt or have no user
290 * context, we must not take the fault.. 294 * context, we must not take the fault..
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index c19571c40a21..e5ab4362322f 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -212,6 +212,14 @@ static void __init arm_bootmem_init(unsigned long start_pfn,
212} 212}
213 213
214#ifdef CONFIG_ZONE_DMA 214#ifdef CONFIG_ZONE_DMA
215/*
216 * The DMA mask corresponding to the maximum bus address allocatable
217 * using GFP_DMA. The default here places no restriction on DMA
218 * allocations. This must be the smallest DMA mask in the system,
219 * so a successful GFP_DMA allocation will always satisfy this.
220 */
221u32 arm_dma_limit;
222
215static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, 223static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
216 unsigned long dma_size) 224 unsigned long dma_size)
217{ 225{
@@ -278,6 +286,8 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
278 */ 286 */
279 arm_adjust_dma_zone(zone_size, zhole_size, 287 arm_adjust_dma_zone(zone_size, zhole_size,
280 ARM_DMA_ZONE_SIZE >> PAGE_SHIFT); 288 ARM_DMA_ZONE_SIZE >> PAGE_SHIFT);
289
290 arm_dma_limit = PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1;
281#endif 291#endif
282 292
283 free_area_init_node(0, zone_size, min, zhole_size); 293 free_area_init_node(0, zone_size, min, zhole_size);
@@ -422,6 +432,17 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s)
422 return pages; 432 return pages;
423} 433}
424 434
435/*
436 * Poison init memory with an undefined instruction (ARM) or a branch to an
437 * undefined instruction (Thumb).
438 */
439static inline void poison_init_mem(void *s, size_t count)
440{
441 u32 *p = (u32 *)s;
442 while ((count = count - 4))
443 *p++ = 0xe7fddef0;
444}
445
425static inline void 446static inline void
426free_memmap(unsigned long start_pfn, unsigned long end_pfn) 447free_memmap(unsigned long start_pfn, unsigned long end_pfn)
427{ 448{
@@ -639,8 +660,8 @@ void __init mem_init(void)
639 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" 660 " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
640#endif 661#endif
641 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 662 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
642 " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
643 " .text : 0x%p" " - 0x%p" " (%4d kB)\n" 663 " .text : 0x%p" " - 0x%p" " (%4d kB)\n"
664 " .init : 0x%p" " - 0x%p" " (%4d kB)\n"
644 " .data : 0x%p" " - 0x%p" " (%4d kB)\n" 665 " .data : 0x%p" " - 0x%p" " (%4d kB)\n"
645 " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", 666 " .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
646 667
@@ -662,8 +683,8 @@ void __init mem_init(void)
662#endif 683#endif
663 MLM(MODULES_VADDR, MODULES_END), 684 MLM(MODULES_VADDR, MODULES_END),
664 685
665 MLK_ROUNDUP(__init_begin, __init_end),
666 MLK_ROUNDUP(_text, _etext), 686 MLK_ROUNDUP(_text, _etext),
687 MLK_ROUNDUP(__init_begin, __init_end),
667 MLK_ROUNDUP(_sdata, _edata), 688 MLK_ROUNDUP(_sdata, _edata),
668 MLK_ROUNDUP(__bss_start, __bss_stop)); 689 MLK_ROUNDUP(__bss_start, __bss_stop));
669 690
@@ -704,11 +725,13 @@ void free_initmem(void)
704#ifdef CONFIG_HAVE_TCM 725#ifdef CONFIG_HAVE_TCM
705 extern char __tcm_start, __tcm_end; 726 extern char __tcm_start, __tcm_end;
706 727
728 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
707 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)), 729 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
708 __phys_to_pfn(__pa(&__tcm_end)), 730 __phys_to_pfn(__pa(&__tcm_end)),
709 "TCM link"); 731 "TCM link");
710#endif 732#endif
711 733
734 poison_init_mem(__init_begin, __init_end - __init_begin);
712 if (!machine_is_integrator() && !machine_is_cintegrator()) 735 if (!machine_is_integrator() && !machine_is_cintegrator())
713 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), 736 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
714 __phys_to_pfn(__pa(__init_end)), 737 __phys_to_pfn(__pa(__init_end)),
@@ -721,10 +744,12 @@ static int keep_initrd;
721 744
722void free_initrd_mem(unsigned long start, unsigned long end) 745void free_initrd_mem(unsigned long start, unsigned long end)
723{ 746{
724 if (!keep_initrd) 747 if (!keep_initrd) {
748 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
725 totalram_pages += free_area(__phys_to_pfn(__pa(start)), 749 totalram_pages += free_area(__phys_to_pfn(__pa(start)),
726 __phys_to_pfn(__pa(end)), 750 __phys_to_pfn(__pa(end)),
727 "initrd"); 751 "initrd");
752 }
728} 753}
729 754
730static int __init keepinitrd_setup(char *__unused) 755static int __init keepinitrd_setup(char *__unused)
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 5b3d7d543659..010566799c80 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -23,5 +23,11 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
23 23
24#endif 24#endif
25 25
26#ifdef CONFIG_ZONE_DMA
27extern u32 arm_dma_limit;
28#else
29#define arm_dma_limit ((u32)~0)
30#endif
31
26void __init bootmem_init(void); 32void __init bootmem_init(void);
27void arm_mm_memblock_reserve(void); 33void arm_mm_memblock_reserve(void);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9d9e736c2b4f..594d677b92c8 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -759,7 +759,7 @@ early_param("vmalloc", early_vmalloc);
759 759
760static phys_addr_t lowmem_limit __initdata = 0; 760static phys_addr_t lowmem_limit __initdata = 0;
761 761
762static void __init sanity_check_meminfo(void) 762void __init sanity_check_meminfo(void)
763{ 763{
764 int i, j, highmem = 0; 764 int i, j, highmem = 0;
765 765
@@ -1032,8 +1032,9 @@ void __init paging_init(struct machine_desc *mdesc)
1032{ 1032{
1033 void *zero_page; 1033 void *zero_page;
1034 1034
1035 memblock_set_current_limit(lowmem_limit);
1036
1035 build_mem_type_table(); 1037 build_mem_type_table();
1036 sanity_check_meminfo();
1037 prepare_page_table(); 1038 prepare_page_table();
1038 map_lowmem(); 1039 map_lowmem();
1039 devicemaps_init(mdesc); 1040 devicemaps_init(mdesc);
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 687d02319a41..941a98c9e8aa 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -27,6 +27,10 @@ void __init arm_mm_memblock_reserve(void)
27 memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE); 27 memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE);
28} 28}
29 29
30void __init sanity_check_meminfo(void)
31{
32}
33
30/* 34/*
31 * paging_init() sets up the page tables, initialises the zone memory 35 * paging_init() sets up the page tables, initialises the zone memory
32 * maps, and sets up the zero page, bad page and bad page tables. 36 * maps, and sets up the zero page, bad page and bad page tables.
diff --git a/arch/arm/mm/pabort-legacy.S b/arch/arm/mm/pabort-legacy.S
index 87970eba88ea..8bbff025269a 100644
--- a/arch/arm/mm/pabort-legacy.S
+++ b/arch/arm/mm/pabort-legacy.S
@@ -4,16 +4,18 @@
4/* 4/*
5 * Function: legacy_pabort 5 * Function: legacy_pabort
6 * 6 *
7 * Params : r0 = address of aborted instruction 7 * Params : r2 = pt_regs
8 * : r4 = address of aborted instruction
9 * : r5 = psr for parent context
8 * 10 *
9 * Returns : r0 = address of abort 11 * Returns : r4 - r11, r13 preserved
10 * : r1 = Simulated IFSR with section translation fault status
11 * 12 *
12 * Purpose : obtain information about current prefetch abort. 13 * Purpose : obtain information about current prefetch abort.
13 */ 14 */
14 15
15 .align 5 16 .align 5
16ENTRY(legacy_pabort) 17ENTRY(legacy_pabort)
18 mov r0, r4
17 mov r1, #5 19 mov r1, #5
18 mov pc, lr 20 b do_PrefetchAbort
19ENDPROC(legacy_pabort) 21ENDPROC(legacy_pabort)
diff --git a/arch/arm/mm/pabort-v6.S b/arch/arm/mm/pabort-v6.S
index 06e3d1ef2115..9627646ce783 100644
--- a/arch/arm/mm/pabort-v6.S
+++ b/arch/arm/mm/pabort-v6.S
@@ -4,16 +4,18 @@
4/* 4/*
5 * Function: v6_pabort 5 * Function: v6_pabort
6 * 6 *
7 * Params : r0 = address of aborted instruction 7 * Params : r2 = pt_regs
8 * : r4 = address of aborted instruction
9 * : r5 = psr for parent context
8 * 10 *
9 * Returns : r0 = address of abort 11 * Returns : r4 - r11, r13 preserved
10 * : r1 = IFSR
11 * 12 *
12 * Purpose : obtain information about current prefetch abort. 13 * Purpose : obtain information about current prefetch abort.
13 */ 14 */
14 15
15 .align 5 16 .align 5
16ENTRY(v6_pabort) 17ENTRY(v6_pabort)
18 mov r0, r4
17 mrc p15, 0, r1, c5, c0, 1 @ get IFSR 19 mrc p15, 0, r1, c5, c0, 1 @ get IFSR
18 mov pc, lr 20 b do_PrefetchAbort
19ENDPROC(v6_pabort) 21ENDPROC(v6_pabort)
diff --git a/arch/arm/mm/pabort-v7.S b/arch/arm/mm/pabort-v7.S
index a8b3b300a18d..875761f44f3b 100644
--- a/arch/arm/mm/pabort-v7.S
+++ b/arch/arm/mm/pabort-v7.S
@@ -2,12 +2,13 @@
2#include <asm/assembler.h> 2#include <asm/assembler.h>
3 3
4/* 4/*
5 * Function: v6_pabort 5 * Function: v7_pabort
6 * 6 *
7 * Params : r0 = address of aborted instruction 7 * Params : r2 = pt_regs
8 * : r4 = address of aborted instruction
9 * : r5 = psr for parent context
8 * 10 *
9 * Returns : r0 = address of abort 11 * Returns : r4 - r11, r13 preserved
10 * : r1 = IFSR
11 * 12 *
12 * Purpose : obtain information about current prefetch abort. 13 * Purpose : obtain information about current prefetch abort.
13 */ 14 */
@@ -16,5 +17,5 @@
16ENTRY(v7_pabort) 17ENTRY(v7_pabort)
17 mrc p15, 0, r0, c6, c0, 2 @ get IFAR 18 mrc p15, 0, r0, c6, c0, 2 @ get IFAR
18 mrc p15, 0, r1, c5, c0, 1 @ get IFSR 19 mrc p15, 0, r1, c5, c0, 1 @ get IFSR
19 mov pc, lr 20 b do_PrefetchAbort
20ENDPROC(v7_pabort) 21ENDPROC(v7_pabort)
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S
index 5f79dc4ce3fb..50e3543d03bf 100644
--- a/arch/arm/mm/proc-arm6_7.S
+++ b/arch/arm/mm/proc-arm6_7.S
@@ -29,19 +29,19 @@ ENTRY(cpu_arm7_dcache_clean_area)
29/* 29/*
30 * Function: arm6_7_data_abort () 30 * Function: arm6_7_data_abort ()
31 * 31 *
32 * Params : r2 = address of aborted instruction 32 * Params : r2 = pt_regs
33 * : sp = pointer to registers 33 * : r4 = aborted context pc
34 * : r5 = aborted context psr
34 * 35 *
35 * Purpose : obtain information about current aborted instruction 36 * Purpose : obtain information about current aborted instruction
36 * 37 *
37 * Returns : r0 = address of abort 38 * Returns : r4-r5, r10-r11, r13 preserved
38 * : r1 = FSR
39 */ 39 */
40 40
41ENTRY(cpu_arm7_data_abort) 41ENTRY(cpu_arm7_data_abort)
42 mrc p15, 0, r1, c5, c0, 0 @ get FSR 42 mrc p15, 0, r1, c5, c0, 0 @ get FSR
43 mrc p15, 0, r0, c6, c0, 0 @ get FAR 43 mrc p15, 0, r0, c6, c0, 0 @ get FAR
44 ldr r8, [r2] @ read arm instruction 44 ldr r8, [r4] @ read arm instruction
45 tst r8, #1 << 20 @ L = 0 -> write? 45 tst r8, #1 << 20 @ L = 0 -> write?
46 orreq r1, r1, #1 << 11 @ yes. 46 orreq r1, r1, #1 << 11 @ yes.
47 and r7, r8, #15 << 24 47 and r7, r8, #15 << 24
@@ -49,7 +49,7 @@ ENTRY(cpu_arm7_data_abort)
49 nop 49 nop
50 50
51/* 0 */ b .data_unknown 51/* 0 */ b .data_unknown
52/* 1 */ mov pc, lr @ swp 52/* 1 */ b do_DataAbort @ swp
53/* 2 */ b .data_unknown 53/* 2 */ b .data_unknown
54/* 3 */ b .data_unknown 54/* 3 */ b .data_unknown
55/* 4 */ b .data_arm_lateldrpostconst @ ldr rd, [rn], #m 55/* 4 */ b .data_arm_lateldrpostconst @ ldr rd, [rn], #m
@@ -60,87 +60,85 @@ ENTRY(cpu_arm7_data_abort)
60/* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist> 60/* 9 */ b .data_arm_ldmstm @ ldm*b rn, <rlist>
61/* a */ b .data_unknown 61/* a */ b .data_unknown
62/* b */ b .data_unknown 62/* b */ b .data_unknown
63/* c */ mov pc, lr @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m 63/* c */ b do_DataAbort @ ldc rd, [rn], #m @ Same as ldr rd, [rn], #m
64/* d */ mov pc, lr @ ldc rd, [rn, #m] 64/* d */ b do_DataAbort @ ldc rd, [rn, #m]
65/* e */ b .data_unknown 65/* e */ b .data_unknown
66/* f */ 66/* f */
67.data_unknown: @ Part of jumptable 67.data_unknown: @ Part of jumptable
68 mov r0, r2 68 mov r0, r4
69 mov r1, r8 69 mov r1, r8
70 mov r2, sp 70 b baddataabort
71 bl baddataabort
72 b ret_from_exception
73 71
74ENTRY(cpu_arm6_data_abort) 72ENTRY(cpu_arm6_data_abort)
75 mrc p15, 0, r1, c5, c0, 0 @ get FSR 73 mrc p15, 0, r1, c5, c0, 0 @ get FSR
76 mrc p15, 0, r0, c6, c0, 0 @ get FAR 74 mrc p15, 0, r0, c6, c0, 0 @ get FAR
77 ldr r8, [r2] @ read arm instruction 75 ldr r8, [r4] @ read arm instruction
78 tst r8, #1 << 20 @ L = 0 -> write? 76 tst r8, #1 << 20 @ L = 0 -> write?
79 orreq r1, r1, #1 << 11 @ yes. 77 orreq r1, r1, #1 << 11 @ yes.
80 and r7, r8, #14 << 24 78 and r7, r8, #14 << 24
81 teq r7, #8 << 24 @ was it ldm/stm 79 teq r7, #8 << 24 @ was it ldm/stm
82 movne pc, lr 80 bne do_DataAbort
83 81
84.data_arm_ldmstm: 82.data_arm_ldmstm:
85 tst r8, #1 << 21 @ check writeback bit 83 tst r8, #1 << 21 @ check writeback bit
86 moveq pc, lr @ no writeback -> no fixup 84 beq do_DataAbort @ no writeback -> no fixup
87 mov r7, #0x11 85 mov r7, #0x11
88 orr r7, r7, #0x1100 86 orr r7, r7, #0x1100
89 and r6, r8, r7 87 and r6, r8, r7
90 and r2, r8, r7, lsl #1 88 and r9, r8, r7, lsl #1
91 add r6, r6, r2, lsr #1 89 add r6, r6, r9, lsr #1
92 and r2, r8, r7, lsl #2 90 and r9, r8, r7, lsl #2
93 add r6, r6, r2, lsr #2 91 add r6, r6, r9, lsr #2
94 and r2, r8, r7, lsl #3 92 and r9, r8, r7, lsl #3
95 add r6, r6, r2, lsr #3 93 add r6, r6, r9, lsr #3
96 add r6, r6, r6, lsr #8 94 add r6, r6, r6, lsr #8
97 add r6, r6, r6, lsr #4 95 add r6, r6, r6, lsr #4
98 and r6, r6, #15 @ r6 = no. of registers to transfer. 96 and r6, r6, #15 @ r6 = no. of registers to transfer.
99 and r5, r8, #15 << 16 @ Extract 'n' from instruction 97 and r9, r8, #15 << 16 @ Extract 'n' from instruction
100 ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' 98 ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
101 tst r8, #1 << 23 @ Check U bit 99 tst r8, #1 << 23 @ Check U bit
102 subne r7, r7, r6, lsl #2 @ Undo increment 100 subne r7, r7, r6, lsl #2 @ Undo increment
103 addeq r7, r7, r6, lsl #2 @ Undo decrement 101 addeq r7, r7, r6, lsl #2 @ Undo decrement
104 str r7, [sp, r5, lsr #14] @ Put register 'Rn' 102 str r7, [r2, r9, lsr #14] @ Put register 'Rn'
105 mov pc, lr 103 b do_DataAbort
106 104
107.data_arm_apply_r6_and_rn: 105.data_arm_apply_r6_and_rn:
108 and r5, r8, #15 << 16 @ Extract 'n' from instruction 106 and r9, r8, #15 << 16 @ Extract 'n' from instruction
109 ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' 107 ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
110 tst r8, #1 << 23 @ Check U bit 108 tst r8, #1 << 23 @ Check U bit
111 subne r7, r7, r6 @ Undo incrmenet 109 subne r7, r7, r6 @ Undo incrmenet
112 addeq r7, r7, r6 @ Undo decrement 110 addeq r7, r7, r6 @ Undo decrement
113 str r7, [sp, r5, lsr #14] @ Put register 'Rn' 111 str r7, [r2, r9, lsr #14] @ Put register 'Rn'
114 mov pc, lr 112 b do_DataAbort
115 113
116.data_arm_lateldrpreconst: 114.data_arm_lateldrpreconst:
117 tst r8, #1 << 21 @ check writeback bit 115 tst r8, #1 << 21 @ check writeback bit
118 moveq pc, lr @ no writeback -> no fixup 116 beq do_DataAbort @ no writeback -> no fixup
119.data_arm_lateldrpostconst: 117.data_arm_lateldrpostconst:
120 movs r2, r8, lsl #20 @ Get offset 118 movs r6, r8, lsl #20 @ Get offset
121 moveq pc, lr @ zero -> no fixup 119 beq do_DataAbort @ zero -> no fixup
122 and r5, r8, #15 << 16 @ Extract 'n' from instruction 120 and r9, r8, #15 << 16 @ Extract 'n' from instruction
123 ldr r7, [sp, r5, lsr #14] @ Get register 'Rn' 121 ldr r7, [r2, r9, lsr #14] @ Get register 'Rn'
124 tst r8, #1 << 23 @ Check U bit 122 tst r8, #1 << 23 @ Check U bit
125 subne r7, r7, r2, lsr #20 @ Undo increment 123 subne r7, r7, r6, lsr #20 @ Undo increment
126 addeq r7, r7, r2, lsr #20 @ Undo decrement 124 addeq r7, r7, r6, lsr #20 @ Undo decrement
127 str r7, [sp, r5, lsr #14] @ Put register 'Rn' 125 str r7, [r2, r9, lsr #14] @ Put register 'Rn'
128 mov pc, lr 126 b do_DataAbort
129 127
130.data_arm_lateldrprereg: 128.data_arm_lateldrprereg:
131 tst r8, #1 << 21 @ check writeback bit 129 tst r8, #1 << 21 @ check writeback bit
132 moveq pc, lr @ no writeback -> no fixup 130 beq do_DataAbort @ no writeback -> no fixup
133.data_arm_lateldrpostreg: 131.data_arm_lateldrpostreg:
134 and r7, r8, #15 @ Extract 'm' from instruction 132 and r7, r8, #15 @ Extract 'm' from instruction
135 ldr r6, [sp, r7, lsl #2] @ Get register 'Rm' 133 ldr r6, [r2, r7, lsl #2] @ Get register 'Rm'
136 mov r5, r8, lsr #7 @ get shift count 134 mov r9, r8, lsr #7 @ get shift count
137 ands r5, r5, #31 135 ands r9, r9, #31
138 and r7, r8, #0x70 @ get shift type 136 and r7, r8, #0x70 @ get shift type
139 orreq r7, r7, #8 @ shift count = 0 137 orreq r7, r7, #8 @ shift count = 0
140 add pc, pc, r7 138 add pc, pc, r7
141 nop 139 nop
142 140
143 mov r6, r6, lsl r5 @ 0: LSL #!0 141 mov r6, r6, lsl r9 @ 0: LSL #!0
144 b .data_arm_apply_r6_and_rn 142 b .data_arm_apply_r6_and_rn
145 b .data_arm_apply_r6_and_rn @ 1: LSL #0 143 b .data_arm_apply_r6_and_rn @ 1: LSL #0
146 nop 144 nop
@@ -148,7 +146,7 @@ ENTRY(cpu_arm6_data_abort)
148 nop 146 nop
149 b .data_unknown @ 3: MUL? 147 b .data_unknown @ 3: MUL?
150 nop 148 nop
151 mov r6, r6, lsr r5 @ 4: LSR #!0 149 mov r6, r6, lsr r9 @ 4: LSR #!0
152 b .data_arm_apply_r6_and_rn 150 b .data_arm_apply_r6_and_rn
153 mov r6, r6, lsr #32 @ 5: LSR #32 151 mov r6, r6, lsr #32 @ 5: LSR #32
154 b .data_arm_apply_r6_and_rn 152 b .data_arm_apply_r6_and_rn
@@ -156,7 +154,7 @@ ENTRY(cpu_arm6_data_abort)
156 nop 154 nop
157 b .data_unknown @ 7: MUL? 155 b .data_unknown @ 7: MUL?
158 nop 156 nop
159 mov r6, r6, asr r5 @ 8: ASR #!0 157 mov r6, r6, asr r9 @ 8: ASR #!0
160 b .data_arm_apply_r6_and_rn 158 b .data_arm_apply_r6_and_rn
161 mov r6, r6, asr #32 @ 9: ASR #32 159 mov r6, r6, asr #32 @ 9: ASR #32
162 b .data_arm_apply_r6_and_rn 160 b .data_arm_apply_r6_and_rn
@@ -164,7 +162,7 @@ ENTRY(cpu_arm6_data_abort)
164 nop 162 nop
165 b .data_unknown @ B: MUL? 163 b .data_unknown @ B: MUL?
166 nop 164 nop
167 mov r6, r6, ror r5 @ C: ROR #!0 165 mov r6, r6, ror r9 @ C: ROR #!0
168 b .data_arm_apply_r6_and_rn 166 b .data_arm_apply_r6_and_rn
169 mov r6, r6, rrx @ D: RRX 167 mov r6, r6, rrx @ D: RRX
170 b .data_arm_apply_r6_and_rn 168 b .data_arm_apply_r6_and_rn
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S
index 184a9c997e36..e9c47271732d 100644
--- a/arch/arm/mm/proc-sa1100.S
+++ b/arch/arm/mm/proc-sa1100.S
@@ -34,7 +34,7 @@
34 */ 34 */
35#define DCACHELINESIZE 32 35#define DCACHELINESIZE 32
36 36
37 __INIT 37 .section .text
38 38
39/* 39/*
40 * cpu_sa1100_proc_init() 40 * cpu_sa1100_proc_init()
@@ -45,8 +45,6 @@ ENTRY(cpu_sa1100_proc_init)
45 mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland 45 mcr p15, 0, r0, c9, c0, 5 @ Allow read-buffer operations from userland
46 mov pc, lr 46 mov pc, lr
47 47
48 .section .text
49
50/* 48/*
51 * cpu_sa1100_proc_fin() 49 * cpu_sa1100_proc_fin()
52 * 50 *
diff --git a/arch/arm/mm/tlb-fa.S b/arch/arm/mm/tlb-fa.S
index 9694f1f6f485..d887a31faaae 100644
--- a/arch/arm/mm/tlb-fa.S
+++ b/arch/arm/mm/tlb-fa.S
@@ -46,7 +46,6 @@ ENTRY(fa_flush_user_tlb_range)
46 add r0, r0, #PAGE_SZ 46 add r0, r0, #PAGE_SZ
47 cmp r0, r1 47 cmp r0, r1
48 blo 1b 48 blo 1b
49 mcr p15, 0, r3, c7, c5, 6 @ invalidate BTB
50 mcr p15, 0, r3, c7, c10, 4 @ data write barrier 49 mcr p15, 0, r3, c7, c10, 4 @ data write barrier
51 mov pc, lr 50 mov pc, lr
52 51
@@ -60,9 +59,8 @@ ENTRY(fa_flush_kern_tlb_range)
60 add r0, r0, #PAGE_SZ 59 add r0, r0, #PAGE_SZ
61 cmp r0, r1 60 cmp r0, r1
62 blo 1b 61 blo 1b
63 mcr p15, 0, r3, c7, c5, 6 @ invalidate BTB
64 mcr p15, 0, r3, c7, c10, 4 @ data write barrier 62 mcr p15, 0, r3, c7, c10, 4 @ data write barrier
65 mcr p15, 0, r3, c7, c5, 4 @ prefetch flush 63 mcr p15, 0, r3, c7, c5, 4 @ prefetch flush (isb)
66 mov pc, lr 64 mov pc, lr
67 65
68 __INITDATA 66 __INITDATA
diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S
index 73d7d89b04c4..ffe06a69a6e5 100644
--- a/arch/arm/mm/tlb-v6.S
+++ b/arch/arm/mm/tlb-v6.S
@@ -54,7 +54,6 @@ ENTRY(v6wbi_flush_user_tlb_range)
54 add r0, r0, #PAGE_SZ 54 add r0, r0, #PAGE_SZ
55 cmp r0, r1 55 cmp r0, r1
56 blo 1b 56 blo 1b
57 mcr p15, 0, ip, c7, c5, 6 @ flush BTAC/BTB
58 mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier 57 mcr p15, 0, ip, c7, c10, 4 @ data synchronization barrier
59 mov pc, lr 58 mov pc, lr
60 59
@@ -83,9 +82,8 @@ ENTRY(v6wbi_flush_kern_tlb_range)
83 add r0, r0, #PAGE_SZ 82 add r0, r0, #PAGE_SZ
84 cmp r0, r1 83 cmp r0, r1
85 blo 1b 84 blo 1b
86 mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
87 mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier 85 mcr p15, 0, r2, c7, c10, 4 @ data synchronization barrier
88 mcr p15, 0, r2, c7, c5, 4 @ prefetch flush 86 mcr p15, 0, r2, c7, c5, 4 @ prefetch flush (isb)
89 mov pc, lr 87 mov pc, lr
90 88
91 __INIT 89 __INIT
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 53cd5b454673..86bb71664508 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -48,9 +48,6 @@ ENTRY(v7wbi_flush_user_tlb_range)
48 add r0, r0, #PAGE_SZ 48 add r0, r0, #PAGE_SZ
49 cmp r0, r1 49 cmp r0, r1
50 blo 1b 50 blo 1b
51 mov ip, #0
52 ALT_SMP(mcr p15, 0, ip, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
53 ALT_UP(mcr p15, 0, ip, c7, c5, 6) @ flush BTAC/BTB
54 dsb 51 dsb
55 mov pc, lr 52 mov pc, lr
56ENDPROC(v7wbi_flush_user_tlb_range) 53ENDPROC(v7wbi_flush_user_tlb_range)
@@ -75,9 +72,6 @@ ENTRY(v7wbi_flush_kern_tlb_range)
75 add r0, r0, #PAGE_SZ 72 add r0, r0, #PAGE_SZ
76 cmp r0, r1 73 cmp r0, r1
77 blo 1b 74 blo 1b
78 mov r2, #0
79 ALT_SMP(mcr p15, 0, r2, c7, c1, 6) @ flush BTAC/BTB Inner Shareable
80 ALT_UP(mcr p15, 0, r2, c7, c5, 6) @ flush BTAC/BTB
81 dsb 75 dsb
82 isb 76 isb
83 mov pc, lr 77 mov pc, lr
diff --git a/arch/arm/plat-mxc/include/mach/entry-macro.S b/arch/arm/plat-mxc/include/mach/entry-macro.S
index 2e49e71b1b98..066d464d322d 100644
--- a/arch/arm/plat-mxc/include/mach/entry-macro.S
+++ b/arch/arm/plat-mxc/include/mach/entry-macro.S
@@ -78,7 +78,3 @@
78 movs \irqnr, \irqnr 78 movs \irqnr, \irqnr
79#endif 79#endif
80 .endm 80 .endm
81
82 @ irq priority table (not used)
83 .macro irq_prio_table
84 .endm
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 6af3d0b1f8d0..363c91e44efb 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -394,20 +394,15 @@ void omap3_sram_restore_context(void)
394} 394}
395#endif /* CONFIG_PM */ 395#endif /* CONFIG_PM */
396 396
397static int __init omap34xx_sram_init(void) 397#endif /* CONFIG_ARCH_OMAP3 */
398{ 398
399 _omap3_sram_configure_core_dpll =
400 omap_sram_push(omap3_sram_configure_core_dpll,
401 omap3_sram_configure_core_dpll_sz);
402 omap_push_sram_idle();
403 return 0;
404}
405#else
406static inline int omap34xx_sram_init(void) 399static inline int omap34xx_sram_init(void)
407{ 400{
401#if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_PM)
402 omap3_sram_restore_context();
403#endif
408 return 0; 404 return 0;
409} 405}
410#endif
411 406
412int __init omap_sram_init(void) 407int __init omap_sram_init(void)
413{ 408{
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index 5b4fffab1eb4..41ab97ebe4cf 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -432,7 +432,7 @@ void __init orion_gpio_init(int gpio_base, int ngpio,
432 ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF; 432 ct->regs.mask = ochip->mask_offset + GPIO_EDGE_MASK_OFF;
433 ct->regs.ack = GPIO_EDGE_CAUSE_OFF; 433 ct->regs.ack = GPIO_EDGE_CAUSE_OFF;
434 ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; 434 ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
435 ct->chip.irq_ack = irq_gc_ack; 435 ct->chip.irq_ack = irq_gc_ack_clr_bit;
436 ct->chip.irq_mask = irq_gc_mask_clr_bit; 436 ct->chip.irq_mask = irq_gc_mask_clr_bit;
437 ct->chip.irq_unmask = irq_gc_mask_set_bit; 437 ct->chip.irq_unmask = irq_gc_mask_set_bit;
438 ct->chip.irq_set_type = gpio_irq_set_type; 438 ct->chip.irq_set_type = gpio_irq_set_type;
diff --git a/arch/arm/plat-pxa/gpio.c b/arch/arm/plat-pxa/gpio.c
index 48ebb9479b61..a11dc3670505 100644
--- a/arch/arm/plat-pxa/gpio.c
+++ b/arch/arm/plat-pxa/gpio.c
@@ -50,7 +50,7 @@ static inline void __iomem *gpio_chip_base(struct gpio_chip *c)
50 return container_of(c, struct pxa_gpio_chip, chip)->regbase; 50 return container_of(c, struct pxa_gpio_chip, chip)->regbase;
51} 51}
52 52
53static inline struct pxa_gpio_chip *gpio_to_chip(unsigned gpio) 53static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio)
54{ 54{
55 return &pxa_gpio_chips[gpio_to_bank(gpio)]; 55 return &pxa_gpio_chips[gpio_to_bank(gpio)];
56} 56}
@@ -161,7 +161,7 @@ static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type)
161 int gpio = irq_to_gpio(d->irq); 161 int gpio = irq_to_gpio(d->irq);
162 unsigned long gpdr, mask = GPIO_bit(gpio); 162 unsigned long gpdr, mask = GPIO_bit(gpio);
163 163
164 c = gpio_to_chip(gpio); 164 c = gpio_to_pxachip(gpio);
165 165
166 if (type == IRQ_TYPE_PROBE) { 166 if (type == IRQ_TYPE_PROBE) {
167 /* Don't mess with enabled GPIOs using preconfigured edges or 167 /* Don't mess with enabled GPIOs using preconfigured edges or
@@ -230,7 +230,7 @@ static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc)
230static void pxa_ack_muxed_gpio(struct irq_data *d) 230static void pxa_ack_muxed_gpio(struct irq_data *d)
231{ 231{
232 int gpio = irq_to_gpio(d->irq); 232 int gpio = irq_to_gpio(d->irq);
233 struct pxa_gpio_chip *c = gpio_to_chip(gpio); 233 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
234 234
235 __raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET); 235 __raw_writel(GPIO_bit(gpio), c->regbase + GEDR_OFFSET);
236} 236}
@@ -238,7 +238,7 @@ static void pxa_ack_muxed_gpio(struct irq_data *d)
238static void pxa_mask_muxed_gpio(struct irq_data *d) 238static void pxa_mask_muxed_gpio(struct irq_data *d)
239{ 239{
240 int gpio = irq_to_gpio(d->irq); 240 int gpio = irq_to_gpio(d->irq);
241 struct pxa_gpio_chip *c = gpio_to_chip(gpio); 241 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
242 uint32_t grer, gfer; 242 uint32_t grer, gfer;
243 243
244 c->irq_mask &= ~GPIO_bit(gpio); 244 c->irq_mask &= ~GPIO_bit(gpio);
@@ -252,7 +252,7 @@ static void pxa_mask_muxed_gpio(struct irq_data *d)
252static void pxa_unmask_muxed_gpio(struct irq_data *d) 252static void pxa_unmask_muxed_gpio(struct irq_data *d)
253{ 253{
254 int gpio = irq_to_gpio(d->irq); 254 int gpio = irq_to_gpio(d->irq);
255 struct pxa_gpio_chip *c = gpio_to_chip(gpio); 255 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio);
256 256
257 c->irq_mask |= GPIO_bit(gpio); 257 c->irq_mask |= GPIO_bit(gpio);
258 update_edge_detect(c); 258 update_edge_detect(c);
diff --git a/arch/arm/plat-s3c24xx/dma.c b/arch/arm/plat-s3c24xx/dma.c
index 2abf9660bc6c..539bd0e3defd 100644
--- a/arch/arm/plat-s3c24xx/dma.c
+++ b/arch/arm/plat-s3c24xx/dma.c
@@ -712,7 +712,7 @@ static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel);
712 * get control of an dma channel 712 * get control of an dma channel
713*/ 713*/
714 714
715int s3c2410_dma_request(unsigned int channel, 715int s3c2410_dma_request(enum dma_ch channel,
716 struct s3c2410_dma_client *client, 716 struct s3c2410_dma_client *client,
717 void *dev) 717 void *dev)
718{ 718{
@@ -783,7 +783,7 @@ EXPORT_SYMBOL(s3c2410_dma_request);
783 * allowed to go through. 783 * allowed to go through.
784*/ 784*/
785 785
786int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client) 786int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *client)
787{ 787{
788 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 788 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
789 unsigned long flags; 789 unsigned long flags;
@@ -974,7 +974,7 @@ static int s3c2410_dma_started(struct s3c2410_dma_chan *chan)
974} 974}
975 975
976int 976int
977s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op) 977s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op)
978{ 978{
979 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 979 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
980 980
@@ -1021,23 +1021,19 @@ EXPORT_SYMBOL(s3c2410_dma_ctrl);
1021 * xfersize: size of unit in bytes (1,2,4) 1021 * xfersize: size of unit in bytes (1,2,4)
1022*/ 1022*/
1023 1023
1024int s3c2410_dma_config(unsigned int channel, 1024int s3c2410_dma_config(enum dma_ch channel,
1025 int xferunit) 1025 int xferunit)
1026{ 1026{
1027 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 1027 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
1028 unsigned int dcon; 1028 unsigned int dcon;
1029 1029
1030 pr_debug("%s: chan=%d, xfer_unit=%d, dcon=%08x\n", 1030 pr_debug("%s: chan=%d, xfer_unit=%d\n", __func__, channel, xferunit);
1031 __func__, channel, xferunit, dcon);
1032 1031
1033 if (chan == NULL) 1032 if (chan == NULL)
1034 return -EINVAL; 1033 return -EINVAL;
1035 1034
1036 pr_debug("%s: Initial dcon is %08x\n", __func__, dcon);
1037
1038 dcon = chan->dcon & dma_sel.dcon_mask; 1035 dcon = chan->dcon & dma_sel.dcon_mask;
1039 1036 pr_debug("%s: dcon is %08x\n", __func__, dcon);
1040 pr_debug("%s: New dcon is %08x\n", __func__, dcon);
1041 1037
1042 switch (chan->req_ch) { 1038 switch (chan->req_ch) {
1043 case DMACH_I2S_IN: 1039 case DMACH_I2S_IN:
@@ -1104,7 +1100,7 @@ EXPORT_SYMBOL(s3c2410_dma_config);
1104 * devaddr: physical address of the source 1100 * devaddr: physical address of the source
1105*/ 1101*/
1106 1102
1107int s3c2410_dma_devconfig(unsigned int channel, 1103int s3c2410_dma_devconfig(enum dma_ch channel,
1108 enum s3c2410_dmasrc source, 1104 enum s3c2410_dmasrc source,
1109 unsigned long devaddr) 1105 unsigned long devaddr)
1110{ 1106{
@@ -1177,7 +1173,7 @@ EXPORT_SYMBOL(s3c2410_dma_devconfig);
1177 * returns the current transfer points for the dma source and destination 1173 * returns the current transfer points for the dma source and destination
1178*/ 1174*/
1179 1175
1180int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *dst) 1176int s3c2410_dma_getposition(enum dma_ch channel, dma_addr_t *src, dma_addr_t *dst)
1181{ 1177{
1182 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 1178 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
1183 1179
@@ -1235,7 +1231,7 @@ static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp)
1235 /* restore channel's hardware configuration */ 1231 /* restore channel's hardware configuration */
1236 1232
1237 if (!cp->in_use) 1233 if (!cp->in_use)
1238 return 0; 1234 return;
1239 1235
1240 printk(KERN_INFO "dma%d: restoring configuration\n", cp->number); 1236 printk(KERN_INFO "dma%d: restoring configuration\n", cp->number);
1241 1237
@@ -1246,8 +1242,6 @@ static void s3c2410_dma_resume_chan(struct s3c2410_dma_chan *cp)
1246 1242
1247 if (cp->map != NULL) 1243 if (cp->map != NULL)
1248 dma_sel.select(cp, cp->map); 1244 dma_sel.select(cp, cp->map);
1249
1250 return 0;
1251} 1245}
1252 1246
1253static void s3c2410_dma_resume(void) 1247static void s3c2410_dma_resume(void)
diff --git a/arch/arm/plat-s3c24xx/sleep.S b/arch/arm/plat-s3c24xx/sleep.S
index fd7032f84ae7..c56612569b40 100644
--- a/arch/arm/plat-s3c24xx/sleep.S
+++ b/arch/arm/plat-s3c24xx/sleep.S
@@ -41,31 +41,6 @@
41 41
42 .text 42 .text
43 43
44 /* s3c_cpu_save
45 *
46 * entry:
47 * r1 = v:p offset
48 */
49
50ENTRY(s3c_cpu_save)
51 stmfd sp!, { r4 - r12, lr }
52 ldr r3, =resume_with_mmu
53 bl cpu_suspend
54
55 @@ jump to final code to send system to sleep
56 ldr r0, =pm_cpu_sleep
57 @@ldr pc, [ r0 ]
58 ldr r0, [ r0 ]
59 mov pc, r0
60
61 @@ return to the caller, after having the MMU
62 @@ turned on, this restores the last bits from the
63 @@ stack
64resume_with_mmu:
65 ldmfd sp!, { r4 - r12, pc }
66
67 .ltorg
68
69 /* sleep magic, to allow the bootloader to check for an valid 44 /* sleep magic, to allow the bootloader to check for an valid
70 * image to resume to. Must be the first word before the 45 * image to resume to. Must be the first word before the
71 * s3c_cpu_resume entry. 46 * s3c_cpu_resume entry.
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c
index 135abda31c9a..327ab9f662e8 100644
--- a/arch/arm/plat-s5p/irq-gpioint.c
+++ b/arch/arm/plat-s5p/irq-gpioint.c
@@ -152,7 +152,7 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip)
152 if (!gc) 152 if (!gc)
153 return -ENOMEM; 153 return -ENOMEM;
154 ct = gc->chip_types; 154 ct = gc->chip_types;
155 ct->chip.irq_ack = irq_gc_ack; 155 ct->chip.irq_ack = irq_gc_ack_set_bit;
156 ct->chip.irq_mask = irq_gc_mask_set_bit; 156 ct->chip.irq_mask = irq_gc_mask_set_bit;
157 ct->chip.irq_unmask = irq_gc_mask_clr_bit; 157 ct->chip.irq_unmask = irq_gc_mask_clr_bit;
158 ct->chip.irq_set_type = s5p_gpioint_set_type, 158 ct->chip.irq_set_type = s5p_gpioint_set_type,
diff --git a/arch/arm/plat-s5p/s5p-time.c b/arch/arm/plat-s5p/s5p-time.c
index 899a8cc011ff..612934c48b0d 100644
--- a/arch/arm/plat-s5p/s5p-time.c
+++ b/arch/arm/plat-s5p/s5p-time.c
@@ -370,11 +370,11 @@ static void __init s5p_clocksource_init(void)
370 370
371 clock_rate = clk_get_rate(tin_source); 371 clock_rate = clk_get_rate(tin_source);
372 372
373 init_sched_clock(&cd, s5p_update_sched_clock, 32, clock_rate);
374
375 s5p_time_setup(timer_source.source_id, TCNT_MAX); 373 s5p_time_setup(timer_source.source_id, TCNT_MAX);
376 s5p_time_start(timer_source.source_id, PERIODIC); 374 s5p_time_start(timer_source.source_id, PERIODIC);
377 375
376 init_sched_clock(&cd, s5p_update_sched_clock, 32, clock_rate);
377
378 if (clocksource_register_hz(&time_clocksource, clock_rate)) 378 if (clocksource_register_hz(&time_clocksource, clock_rate))
379 panic("%s: can't register clocksource\n", time_clocksource.name); 379 panic("%s: can't register clocksource\n", time_clocksource.name);
380} 380}
diff --git a/arch/arm/plat-samsung/dma.c b/arch/arm/plat-samsung/dma.c
index cb459dd95459..6143aa147688 100644
--- a/arch/arm/plat-samsung/dma.c
+++ b/arch/arm/plat-samsung/dma.c
@@ -41,7 +41,7 @@ struct s3c2410_dma_chan *s3c_dma_lookup_channel(unsigned int channel)
41 * irq? 41 * irq?
42*/ 42*/
43 43
44int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn) 44int s3c2410_dma_set_opfn(enum dma_ch channel, s3c2410_dma_opfn_t rtn)
45{ 45{
46 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 46 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
47 47
@@ -56,7 +56,7 @@ int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn)
56} 56}
57EXPORT_SYMBOL(s3c2410_dma_set_opfn); 57EXPORT_SYMBOL(s3c2410_dma_set_opfn);
58 58
59int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn) 59int s3c2410_dma_set_buffdone_fn(enum dma_ch channel, s3c2410_dma_cbfn_t rtn)
60{ 60{
61 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 61 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
62 62
@@ -71,7 +71,7 @@ int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn)
71} 71}
72EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn); 72EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
73 73
74int s3c2410_dma_setflags(unsigned int channel, unsigned int flags) 74int s3c2410_dma_setflags(enum dma_ch channel, unsigned int flags)
75{ 75{
76 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel); 76 struct s3c2410_dma_chan *chan = s3c_dma_lookup_channel(channel);
77 77
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index 4af108ff4112..e3b31c26ac3e 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -12,6 +12,10 @@
12 * it under the terms of the GNU General Public License version 2 as 12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation. 13 * published by the Free Software Foundation.
14*/ 14*/
15
16#ifndef __PLAT_DEVS_H
17#define __PLAT_DEVS_H __FILE__
18
15#include <linux/platform_device.h> 19#include <linux/platform_device.h>
16 20
17struct s3c24xx_uart_resources { 21struct s3c24xx_uart_resources {
@@ -159,3 +163,5 @@ extern struct platform_device s3c_device_ac97;
159 */ 163 */
160extern void *s3c_set_platdata(void *pd, size_t pdsize, 164extern void *s3c_set_platdata(void *pd, size_t pdsize,
161 struct platform_device *pdev); 165 struct platform_device *pdev);
166
167#endif /* __PLAT_DEVS_H */
diff --git a/arch/arm/plat-samsung/include/plat/dma.h b/arch/arm/plat-samsung/include/plat/dma.h
index 2e8f8c6560d7..8c273b7a6f56 100644
--- a/arch/arm/plat-samsung/include/plat/dma.h
+++ b/arch/arm/plat-samsung/include/plat/dma.h
@@ -42,6 +42,7 @@ struct s3c2410_dma_client {
42}; 42};
43 43
44struct s3c2410_dma_chan; 44struct s3c2410_dma_chan;
45enum dma_ch;
45 46
46/* s3c2410_dma_cbfn_t 47/* s3c2410_dma_cbfn_t
47 * 48 *
@@ -62,7 +63,7 @@ typedef int (*s3c2410_dma_opfn_t)(struct s3c2410_dma_chan *,
62 * request a dma channel exclusivley 63 * request a dma channel exclusivley
63*/ 64*/
64 65
65extern int s3c2410_dma_request(unsigned int channel, 66extern int s3c2410_dma_request(enum dma_ch channel,
66 struct s3c2410_dma_client *, void *dev); 67 struct s3c2410_dma_client *, void *dev);
67 68
68 69
@@ -71,14 +72,14 @@ extern int s3c2410_dma_request(unsigned int channel,
71 * change the state of the dma channel 72 * change the state of the dma channel
72*/ 73*/
73 74
74extern int s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op); 75extern int s3c2410_dma_ctrl(enum dma_ch channel, enum s3c2410_chan_op op);
75 76
76/* s3c2410_dma_setflags 77/* s3c2410_dma_setflags
77 * 78 *
78 * set the channel's flags to a given state 79 * set the channel's flags to a given state
79*/ 80*/
80 81
81extern int s3c2410_dma_setflags(unsigned int channel, 82extern int s3c2410_dma_setflags(enum dma_ch channel,
82 unsigned int flags); 83 unsigned int flags);
83 84
84/* s3c2410_dma_free 85/* s3c2410_dma_free
@@ -86,7 +87,7 @@ extern int s3c2410_dma_setflags(unsigned int channel,
86 * free the dma channel (will also abort any outstanding operations) 87 * free the dma channel (will also abort any outstanding operations)
87*/ 88*/
88 89
89extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *); 90extern int s3c2410_dma_free(enum dma_ch channel, struct s3c2410_dma_client *);
90 91
91/* s3c2410_dma_enqueue 92/* s3c2410_dma_enqueue
92 * 93 *
@@ -95,7 +96,7 @@ extern int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *);
95 * drained before the buffer is given to the DMA system. 96 * drained before the buffer is given to the DMA system.
96*/ 97*/
97 98
98extern int s3c2410_dma_enqueue(unsigned int channel, void *id, 99extern int s3c2410_dma_enqueue(enum dma_ch channel, void *id,
99 dma_addr_t data, int size); 100 dma_addr_t data, int size);
100 101
101/* s3c2410_dma_config 102/* s3c2410_dma_config
@@ -103,14 +104,14 @@ extern int s3c2410_dma_enqueue(unsigned int channel, void *id,
103 * configure the dma channel 104 * configure the dma channel
104*/ 105*/
105 106
106extern int s3c2410_dma_config(unsigned int channel, int xferunit); 107extern int s3c2410_dma_config(enum dma_ch channel, int xferunit);
107 108
108/* s3c2410_dma_devconfig 109/* s3c2410_dma_devconfig
109 * 110 *
110 * configure the device we're talking to 111 * configure the device we're talking to
111*/ 112*/
112 113
113extern int s3c2410_dma_devconfig(unsigned int channel, 114extern int s3c2410_dma_devconfig(enum dma_ch channel,
114 enum s3c2410_dmasrc source, unsigned long devaddr); 115 enum s3c2410_dmasrc source, unsigned long devaddr);
115 116
116/* s3c2410_dma_getposition 117/* s3c2410_dma_getposition
@@ -118,10 +119,10 @@ extern int s3c2410_dma_devconfig(unsigned int channel,
118 * get the position that the dma transfer is currently at 119 * get the position that the dma transfer is currently at
119*/ 120*/
120 121
121extern int s3c2410_dma_getposition(unsigned int channel, 122extern int s3c2410_dma_getposition(enum dma_ch channel,
122 dma_addr_t *src, dma_addr_t *dest); 123 dma_addr_t *src, dma_addr_t *dest);
123 124
124extern int s3c2410_dma_set_opfn(unsigned int, s3c2410_dma_opfn_t rtn); 125extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn);
125extern int s3c2410_dma_set_buffdone_fn(unsigned int, s3c2410_dma_cbfn_t rtn); 126extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn);
126 127
127 128
diff --git a/arch/arm/plat-samsung/include/plat/pm.h b/arch/arm/plat-samsung/include/plat/pm.h
index 7fb6f6be8c81..f6749916d194 100644
--- a/arch/arm/plat-samsung/include/plat/pm.h
+++ b/arch/arm/plat-samsung/include/plat/pm.h
@@ -42,7 +42,7 @@ extern unsigned long s3c_irqwake_eintallow;
42/* per-cpu sleep functions */ 42/* per-cpu sleep functions */
43 43
44extern void (*pm_cpu_prep)(void); 44extern void (*pm_cpu_prep)(void);
45extern void (*pm_cpu_sleep)(void); 45extern int (*pm_cpu_sleep)(unsigned long);
46 46
47/* Flags for PM Control */ 47/* Flags for PM Control */
48 48
@@ -52,10 +52,9 @@ extern unsigned char pm_uart_udivslot; /* true to save UART UDIVSLOT */
52 52
53/* from sleep.S */ 53/* from sleep.S */
54 54
55extern int s3c_cpu_save(unsigned long *saveblk, long);
56extern void s3c_cpu_resume(void); 55extern void s3c_cpu_resume(void);
57 56
58extern void s3c2410_cpu_suspend(void); 57extern int s3c2410_cpu_suspend(unsigned long);
59 58
60/* sleep save info */ 59/* sleep save info */
61 60
diff --git a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
index 0ffe34a21554..4c16fa3621bb 100644
--- a/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
+++ b/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
@@ -39,6 +39,7 @@ struct s3c64xx_spi_csinfo {
39 * @fifo_lvl_mask: All tx fifo_lvl fields start at offset-6 39 * @fifo_lvl_mask: All tx fifo_lvl fields start at offset-6
40 * @rx_lvl_offset: Depends on tx fifo_lvl field and bus number 40 * @rx_lvl_offset: Depends on tx fifo_lvl field and bus number
41 * @high_speed: If the controller supports HIGH_SPEED_EN bit 41 * @high_speed: If the controller supports HIGH_SPEED_EN bit
42 * @tx_st_done: Depends on tx fifo_lvl field
42 */ 43 */
43struct s3c64xx_spi_info { 44struct s3c64xx_spi_info {
44 int src_clk_nr; 45 int src_clk_nr;
@@ -53,6 +54,7 @@ struct s3c64xx_spi_info {
53 int fifo_lvl_mask; 54 int fifo_lvl_mask;
54 int rx_lvl_offset; 55 int rx_lvl_offset;
55 int high_speed; 56 int high_speed;
57 int tx_st_done;
56}; 58};
57 59
58/** 60/**
diff --git a/arch/arm/plat-samsung/irq-uart.c b/arch/arm/plat-samsung/irq-uart.c
index 32582c0958e3..657405c481d0 100644
--- a/arch/arm/plat-samsung/irq-uart.c
+++ b/arch/arm/plat-samsung/irq-uart.c
@@ -54,8 +54,15 @@ static void __init s3c_init_uart_irq(struct s3c_uart_irq *uirq)
54 54
55 gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base, 55 gc = irq_alloc_generic_chip("s3c-uart", 1, uirq->base_irq, reg_base,
56 handle_level_irq); 56 handle_level_irq);
57
58 if (!gc) {
59 pr_err("%s: irq_alloc_generic_chip for IRQ %u failed\n",
60 __func__, uirq->base_irq);
61 return;
62 }
63
57 ct = gc->chip_types; 64 ct = gc->chip_types;
58 ct->chip.irq_ack = irq_gc_ack; 65 ct->chip.irq_ack = irq_gc_ack_set_bit;
59 ct->chip.irq_mask = irq_gc_mask_set_bit; 66 ct->chip.irq_mask = irq_gc_mask_set_bit;
60 ct->chip.irq_unmask = irq_gc_mask_clr_bit; 67 ct->chip.irq_unmask = irq_gc_mask_clr_bit;
61 ct->regs.ack = S3C64XX_UINTP; 68 ct->regs.ack = S3C64XX_UINTP;
diff --git a/arch/arm/plat-samsung/irq-vic-timer.c b/arch/arm/plat-samsung/irq-vic-timer.c
index a607546ddbd0..f714d060370d 100644
--- a/arch/arm/plat-samsung/irq-vic-timer.c
+++ b/arch/arm/plat-samsung/irq-vic-timer.c
@@ -54,6 +54,13 @@ void __init s3c_init_vic_timer_irq(unsigned int num, unsigned int timer_irq)
54 54
55 s3c_tgc = irq_alloc_generic_chip("s3c-timer", 1, timer_irq, 55 s3c_tgc = irq_alloc_generic_chip("s3c-timer", 1, timer_irq,
56 S3C64XX_TINT_CSTAT, handle_level_irq); 56 S3C64XX_TINT_CSTAT, handle_level_irq);
57
58 if (!s3c_tgc) {
59 pr_err("%s: irq_alloc_generic_chip for IRQ %d failed\n",
60 __func__, timer_irq);
61 return;
62 }
63
57 ct = s3c_tgc->chip_types; 64 ct = s3c_tgc->chip_types;
58 ct->chip.irq_mask = irq_gc_mask_clr_bit; 65 ct->chip.irq_mask = irq_gc_mask_clr_bit;
59 ct->chip.irq_unmask = irq_gc_mask_set_bit; 66 ct->chip.irq_unmask = irq_gc_mask_set_bit;
diff --git a/arch/arm/plat-samsung/pm.c b/arch/arm/plat-samsung/pm.c
index 5c0a440d6e16..5fa1742d019b 100644
--- a/arch/arm/plat-samsung/pm.c
+++ b/arch/arm/plat-samsung/pm.c
@@ -20,6 +20,7 @@
20#include <linux/io.h> 20#include <linux/io.h>
21 21
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
23#include <asm/suspend.h>
23#include <mach/hardware.h> 24#include <mach/hardware.h>
24#include <mach/map.h> 25#include <mach/map.h>
25 26
@@ -231,7 +232,7 @@ static void __maybe_unused s3c_pm_show_resume_irqs(int start,
231 232
232 233
233void (*pm_cpu_prep)(void); 234void (*pm_cpu_prep)(void);
234void (*pm_cpu_sleep)(void); 235int (*pm_cpu_sleep)(unsigned long);
235 236
236#define any_allowed(mask, allow) (((mask) & (allow)) != (allow)) 237#define any_allowed(mask, allow) (((mask) & (allow)) != (allow))
237 238
@@ -294,15 +295,11 @@ static int s3c_pm_enter(suspend_state_t state)
294 295
295 s3c_pm_arch_stop_clocks(); 296 s3c_pm_arch_stop_clocks();
296 297
297 /* s3c_cpu_save will also act as our return point from when 298 /* this will also act as our return point from when
298 * we resume as it saves its own register state and restores it 299 * we resume as it saves its own register state and restores it
299 * during the resume. */ 300 * during the resume. */
300 301
301 s3c_cpu_save(0, PLAT_PHYS_OFFSET - PAGE_OFFSET); 302 cpu_suspend(0, pm_cpu_sleep);
302
303 /* restore the cpu state using the kernel's cpu init code. */
304
305 cpu_init();
306 303
307 /* restore the system state */ 304 /* restore the system state */
308 305
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index c018696765d4..5c74eb797f08 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -14,7 +14,7 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16#include <linux/spinlock.h> 16#include <linux/spinlock.h>
17#include <linux/sysdev.h> 17#include <linux/syscore_ops.h>
18#include <linux/irq.h> 18#include <linux/irq.h>
19 19
20#include <asm/i8259.h> 20#include <asm/i8259.h>
@@ -215,14 +215,13 @@ spurious_8259A_irq:
215 } 215 }
216} 216}
217 217
218static int i8259A_resume(struct sys_device *dev) 218static void i8259A_resume(void)
219{ 219{
220 if (i8259A_auto_eoi >= 0) 220 if (i8259A_auto_eoi >= 0)
221 init_8259A(i8259A_auto_eoi); 221 init_8259A(i8259A_auto_eoi);
222 return 0;
223} 222}
224 223
225static int i8259A_shutdown(struct sys_device *dev) 224static void i8259A_shutdown(void)
226{ 225{
227 /* Put the i8259A into a quiescent state that 226 /* Put the i8259A into a quiescent state that
228 * the kernel initialization code can get it 227 * the kernel initialization code can get it
@@ -232,26 +231,17 @@ static int i8259A_shutdown(struct sys_device *dev)
232 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ 231 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
233 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ 232 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
234 } 233 }
235 return 0;
236} 234}
237 235
238static struct sysdev_class i8259_sysdev_class = { 236static struct syscore_ops i8259_syscore_ops = {
239 .name = "i8259",
240 .resume = i8259A_resume, 237 .resume = i8259A_resume,
241 .shutdown = i8259A_shutdown, 238 .shutdown = i8259A_shutdown,
242}; 239};
243 240
244static struct sys_device device_i8259A = {
245 .id = 0,
246 .cls = &i8259_sysdev_class,
247};
248
249static int __init i8259A_init_sysfs(void) 241static int __init i8259A_init_sysfs(void)
250{ 242{
251 int error = sysdev_class_register(&i8259_sysdev_class); 243 register_syscore_ops(&i8259_syscore_ops);
252 if (!error) 244 return 0;
253 error = sysdev_register(&device_i8259A);
254 return error;
255} 245}
256 246
257device_initcall(i8259A_init_sysfs); 247device_initcall(i8259A_init_sysfs);
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 33867ec4a234..9d6a8effeda2 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -12,6 +12,8 @@
12#include <linux/of.h> 12#include <linux/of.h>
13#include <linux/memblock.h> 13#include <linux/memblock.h>
14#include <linux/vmalloc.h> 14#include <linux/vmalloc.h>
15#include <linux/memory.h>
16
15#include <asm/firmware.h> 17#include <asm/firmware.h>
16#include <asm/machdep.h> 18#include <asm/machdep.h>
17#include <asm/pSeries_reconfig.h> 19#include <asm/pSeries_reconfig.h>
@@ -20,24 +22,25 @@
20static unsigned long get_memblock_size(void) 22static unsigned long get_memblock_size(void)
21{ 23{
22 struct device_node *np; 24 struct device_node *np;
23 unsigned int memblock_size = 0; 25 unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
26 struct resource r;
24 27
25 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); 28 np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
26 if (np) { 29 if (np) {
27 const unsigned long *size; 30 const __be64 *size;
28 31
29 size = of_get_property(np, "ibm,lmb-size", NULL); 32 size = of_get_property(np, "ibm,lmb-size", NULL);
30 memblock_size = size ? *size : 0; 33 if (size)
31 34 memblock_size = be64_to_cpup(size);
32 of_node_put(np); 35 of_node_put(np);
33 } else { 36 } else if (machine_is(pseries)) {
37 /* This fallback really only applies to pseries */
34 unsigned int memzero_size = 0; 38 unsigned int memzero_size = 0;
35 const unsigned int *regs;
36 39
37 np = of_find_node_by_path("/memory@0"); 40 np = of_find_node_by_path("/memory@0");
38 if (np) { 41 if (np) {
39 regs = of_get_property(np, "reg", NULL); 42 if (!of_address_to_resource(np, 0, &r))
40 memzero_size = regs ? regs[3] : 0; 43 memzero_size = resource_size(&r);
41 of_node_put(np); 44 of_node_put(np);
42 } 45 }
43 46
@@ -50,16 +53,21 @@ static unsigned long get_memblock_size(void)
50 sprintf(buf, "/memory@%x", memzero_size); 53 sprintf(buf, "/memory@%x", memzero_size);
51 np = of_find_node_by_path(buf); 54 np = of_find_node_by_path(buf);
52 if (np) { 55 if (np) {
53 regs = of_get_property(np, "reg", NULL); 56 if (!of_address_to_resource(np, 0, &r))
54 memblock_size = regs ? regs[3] : 0; 57 memblock_size = resource_size(&r);
55 of_node_put(np); 58 of_node_put(np);
56 } 59 }
57 } 60 }
58 } 61 }
59
60 return memblock_size; 62 return memblock_size;
61} 63}
62 64
65/* WARNING: This is going to override the generic definition whenever
66 * pseries is built-in regardless of what platform is active at boot
67 * time. This is fine for now as this is the only "option" and it
68 * should work everywhere. If not, we'll have to turn this into a
69 * ppc_md. callback
70 */
63unsigned long memory_block_size_bytes(void) 71unsigned long memory_block_size_bytes(void)
64{ 72{
65 return get_memblock_size(); 73 return get_memblock_size();
diff --git a/arch/sparc/include/asm/irqflags_32.h b/arch/sparc/include/asm/irqflags_32.h
index d4d0711de0f9..14848909e0de 100644
--- a/arch/sparc/include/asm/irqflags_32.h
+++ b/arch/sparc/include/asm/irqflags_32.h
@@ -18,7 +18,7 @@ extern void arch_local_irq_restore(unsigned long);
18extern unsigned long arch_local_irq_save(void); 18extern unsigned long arch_local_irq_save(void);
19extern void arch_local_irq_enable(void); 19extern void arch_local_irq_enable(void);
20 20
21static inline unsigned long arch_local_save_flags(void) 21static inline notrace unsigned long arch_local_save_flags(void)
22{ 22{
23 unsigned long flags; 23 unsigned long flags;
24 24
@@ -26,17 +26,17 @@ static inline unsigned long arch_local_save_flags(void)
26 return flags; 26 return flags;
27} 27}
28 28
29static inline void arch_local_irq_disable(void) 29static inline notrace void arch_local_irq_disable(void)
30{ 30{
31 arch_local_irq_save(); 31 arch_local_irq_save();
32} 32}
33 33
34static inline bool arch_irqs_disabled_flags(unsigned long flags) 34static inline notrace bool arch_irqs_disabled_flags(unsigned long flags)
35{ 35{
36 return (flags & PSR_PIL) != 0; 36 return (flags & PSR_PIL) != 0;
37} 37}
38 38
39static inline bool arch_irqs_disabled(void) 39static inline notrace bool arch_irqs_disabled(void)
40{ 40{
41 return arch_irqs_disabled_flags(arch_local_save_flags()); 41 return arch_irqs_disabled_flags(arch_local_save_flags());
42} 42}
diff --git a/arch/sparc/include/asm/irqflags_64.h b/arch/sparc/include/asm/irqflags_64.h
index aab969c82c2b..23cd27f6beb4 100644
--- a/arch/sparc/include/asm/irqflags_64.h
+++ b/arch/sparc/include/asm/irqflags_64.h
@@ -14,7 +14,7 @@
14 14
15#ifndef __ASSEMBLY__ 15#ifndef __ASSEMBLY__
16 16
17static inline unsigned long arch_local_save_flags(void) 17static inline notrace unsigned long arch_local_save_flags(void)
18{ 18{
19 unsigned long flags; 19 unsigned long flags;
20 20
@@ -26,7 +26,7 @@ static inline unsigned long arch_local_save_flags(void)
26 return flags; 26 return flags;
27} 27}
28 28
29static inline void arch_local_irq_restore(unsigned long flags) 29static inline notrace void arch_local_irq_restore(unsigned long flags)
30{ 30{
31 __asm__ __volatile__( 31 __asm__ __volatile__(
32 "wrpr %0, %%pil" 32 "wrpr %0, %%pil"
@@ -36,7 +36,7 @@ static inline void arch_local_irq_restore(unsigned long flags)
36 ); 36 );
37} 37}
38 38
39static inline void arch_local_irq_disable(void) 39static inline notrace void arch_local_irq_disable(void)
40{ 40{
41 __asm__ __volatile__( 41 __asm__ __volatile__(
42 "wrpr %0, %%pil" 42 "wrpr %0, %%pil"
@@ -46,7 +46,7 @@ static inline void arch_local_irq_disable(void)
46 ); 46 );
47} 47}
48 48
49static inline void arch_local_irq_enable(void) 49static inline notrace void arch_local_irq_enable(void)
50{ 50{
51 __asm__ __volatile__( 51 __asm__ __volatile__(
52 "wrpr 0, %%pil" 52 "wrpr 0, %%pil"
@@ -56,17 +56,17 @@ static inline void arch_local_irq_enable(void)
56 ); 56 );
57} 57}
58 58
59static inline int arch_irqs_disabled_flags(unsigned long flags) 59static inline notrace int arch_irqs_disabled_flags(unsigned long flags)
60{ 60{
61 return (flags > 0); 61 return (flags > 0);
62} 62}
63 63
64static inline int arch_irqs_disabled(void) 64static inline notrace int arch_irqs_disabled(void)
65{ 65{
66 return arch_irqs_disabled_flags(arch_local_save_flags()); 66 return arch_irqs_disabled_flags(arch_local_save_flags());
67} 67}
68 68
69static inline unsigned long arch_local_irq_save(void) 69static inline notrace unsigned long arch_local_irq_save(void)
70{ 70{
71 unsigned long flags, tmp; 71 unsigned long flags, tmp;
72 72
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 9fe08a1ea6c6..f445e98463e6 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -293,7 +293,7 @@ maybe_smp4m_msg:
293 WRITE_PAUSE 293 WRITE_PAUSE
294 wr %l4, PSR_ET, %psr 294 wr %l4, PSR_ET, %psr
295 WRITE_PAUSE 295 WRITE_PAUSE
296 sll %o3, 28, %o2 ! shift for simpler checks below 296 srl %o3, 28, %o2 ! shift for simpler checks below
297maybe_smp4m_msg_check_single: 297maybe_smp4m_msg_check_single:
298 andcc %o2, 0x1, %g0 298 andcc %o2, 0x1, %g0
299 beq,a maybe_smp4m_msg_check_mask 299 beq,a maybe_smp4m_msg_check_mask
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
index c0e01297e64e..e485a6804998 100644
--- a/arch/sparc/mm/leon_mm.c
+++ b/arch/sparc/mm/leon_mm.c
@@ -226,7 +226,7 @@ void leon3_getCacheRegs(struct leon3_cacheregs *regs)
226 * Leon2 and Leon3 differ in their way of telling cache information 226 * Leon2 and Leon3 differ in their way of telling cache information
227 * 227 *
228 */ 228 */
229int leon_flush_needed(void) 229int __init leon_flush_needed(void)
230{ 230{
231 int flush_needed = -1; 231 int flush_needed = -1;
232 unsigned int ssize, sets; 232 unsigned int ssize, sets;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index da349723d411..37357a599dca 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1170,7 +1170,7 @@ comment "NUMA (Summit) requires SMP, 64GB highmem support, ACPI"
1170config AMD_NUMA 1170config AMD_NUMA
1171 def_bool y 1171 def_bool y
1172 prompt "Old style AMD Opteron NUMA detection" 1172 prompt "Old style AMD Opteron NUMA detection"
1173 depends on NUMA && PCI 1173 depends on X86_64 && NUMA && PCI
1174 ---help--- 1174 ---help---
1175 Enable AMD NUMA node topology detection. You should say Y here if 1175 Enable AMD NUMA node topology detection. You should say Y here if
1176 you have a multi processor AMD system. This uses an old method to 1176 you have a multi processor AMD system. This uses an old method to
diff --git a/arch/x86/include/asm/mmzone_32.h b/arch/x86/include/asm/mmzone_32.h
index 224e8c5eb307..ffa037f28d39 100644
--- a/arch/x86/include/asm/mmzone_32.h
+++ b/arch/x86/include/asm/mmzone_32.h
@@ -57,6 +57,8 @@ static inline int pfn_valid(int pfn)
57 return 0; 57 return 0;
58} 58}
59 59
60#define early_pfn_valid(pfn) pfn_valid((pfn))
61
60#endif /* CONFIG_DISCONTIGMEM */ 62#endif /* CONFIG_DISCONTIGMEM */
61 63
62#ifdef CONFIG_NEED_MULTIPLE_NODES 64#ifdef CONFIG_NEED_MULTIPLE_NODES
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
index ead21b663117..b4fd836e4053 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.S
+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
@@ -28,6 +28,8 @@ pmode_cr3: .long 0 /* Saved %cr3 */
28pmode_cr4: .long 0 /* Saved %cr4 */ 28pmode_cr4: .long 0 /* Saved %cr4 */
29pmode_efer: .quad 0 /* Saved EFER */ 29pmode_efer: .quad 0 /* Saved EFER */
30pmode_gdt: .quad 0 30pmode_gdt: .quad 0
31pmode_misc_en: .quad 0 /* Saved MISC_ENABLE MSR */
32pmode_behavior: .long 0 /* Wakeup behavior flags */
31realmode_flags: .long 0 33realmode_flags: .long 0
32real_magic: .long 0 34real_magic: .long 0
33trampoline_segment: .word 0 35trampoline_segment: .word 0
@@ -91,6 +93,18 @@ wakeup_code:
91 /* Call the C code */ 93 /* Call the C code */
92 calll main 94 calll main
93 95
96 /* Restore MISC_ENABLE before entering protected mode, in case
97 BIOS decided to clear XD_DISABLE during S3. */
98 movl pmode_behavior, %eax
99 btl $WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %eax
100 jnc 1f
101
102 movl pmode_misc_en, %eax
103 movl pmode_misc_en + 4, %edx
104 movl $MSR_IA32_MISC_ENABLE, %ecx
105 wrmsr
1061:
107
94 /* Do any other stuff... */ 108 /* Do any other stuff... */
95 109
96#ifndef CONFIG_64BIT 110#ifndef CONFIG_64BIT
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.h b/arch/x86/kernel/acpi/realmode/wakeup.h
index e1828c07e79c..97a29e1430e3 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.h
+++ b/arch/x86/kernel/acpi/realmode/wakeup.h
@@ -21,6 +21,9 @@ struct wakeup_header {
21 u32 pmode_efer_low; /* Protected mode EFER */ 21 u32 pmode_efer_low; /* Protected mode EFER */
22 u32 pmode_efer_high; 22 u32 pmode_efer_high;
23 u64 pmode_gdt; 23 u64 pmode_gdt;
24 u32 pmode_misc_en_low; /* Protected mode MISC_ENABLE */
25 u32 pmode_misc_en_high;
26 u32 pmode_behavior; /* Wakeup routine behavior flags */
24 u32 realmode_flags; 27 u32 realmode_flags;
25 u32 real_magic; 28 u32 real_magic;
26 u16 trampoline_segment; /* segment with trampoline code, 64-bit only */ 29 u16 trampoline_segment; /* segment with trampoline code, 64-bit only */
@@ -39,4 +42,7 @@ extern struct wakeup_header wakeup_header;
39#define WAKEUP_HEADER_SIGNATURE 0x51ee1111 42#define WAKEUP_HEADER_SIGNATURE 0x51ee1111
40#define WAKEUP_END_SIGNATURE 0x65a22c82 43#define WAKEUP_END_SIGNATURE 0x65a22c82
41 44
45/* Wakeup behavior bits */
46#define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE 0
47
42#endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */ 48#endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 18a857ba7a25..103b6ab368d3 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -77,6 +77,12 @@ int acpi_suspend_lowlevel(void)
77 77
78 header->pmode_cr0 = read_cr0(); 78 header->pmode_cr0 = read_cr0();
79 header->pmode_cr4 = read_cr4_safe(); 79 header->pmode_cr4 = read_cr4_safe();
80 header->pmode_behavior = 0;
81 if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
82 &header->pmode_misc_en_low,
83 &header->pmode_misc_en_high))
84 header->pmode_behavior |=
85 (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
80 header->realmode_flags = acpi_realmode_flags; 86 header->realmode_flags = acpi_realmode_flags;
81 header->real_magic = 0x12345678; 87 header->real_magic = 0x12345678;
82 88
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 0c016f727695..14eed214b584 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -294,6 +294,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
294 DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"), 294 DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
295 }, 295 },
296 }, 296 },
297 { /* Handle reboot issue on Acer Aspire one */
298 .callback = set_bios_reboot,
299 .ident = "Acer Aspire One A110",
300 .matches = {
301 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
302 DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"),
303 },
304 },
297 { } 305 { }
298}; 306};
299 307
@@ -411,6 +419,14 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
411 DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"), 419 DMI_MATCH(DMI_PRODUCT_NAME, "iMac9,1"),
412 }, 420 },
413 }, 421 },
422 { /* Handle problems with rebooting on the Latitude E6320. */
423 .callback = set_pci_reboot,
424 .ident = "Dell Latitude E6320",
425 .matches = {
426 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
427 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
428 },
429 },
414 { } 430 { }
415}; 431};
416 432
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index d865c4aeec55..bbaaa005bf0e 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -28,6 +28,7 @@
28#include <linux/poison.h> 28#include <linux/poison.h>
29#include <linux/dma-mapping.h> 29#include <linux/dma-mapping.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/memory.h>
31#include <linux/memory_hotplug.h> 32#include <linux/memory_hotplug.h>
32#include <linux/nmi.h> 33#include <linux/nmi.h>
33#include <linux/gfp.h> 34#include <linux/gfp.h>
@@ -895,8 +896,6 @@ const char *arch_vma_name(struct vm_area_struct *vma)
895} 896}
896 897
897#ifdef CONFIG_X86_UV 898#ifdef CONFIG_X86_UV
898#define MIN_MEMORY_BLOCK_SIZE (1 << SECTION_SIZE_BITS)
899
900unsigned long memory_block_size_bytes(void) 899unsigned long memory_block_size_bytes(void)
901{ 900{
902 if (is_uv_system()) { 901 if (is_uv_system()) {
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
index cf9750004a08..68894fdc034b 100644
--- a/arch/x86/oprofile/nmi_int.c
+++ b/arch/x86/oprofile/nmi_int.c
@@ -112,8 +112,10 @@ static void nmi_cpu_start(void *dummy)
112static int nmi_start(void) 112static int nmi_start(void)
113{ 113{
114 get_online_cpus(); 114 get_online_cpus();
115 on_each_cpu(nmi_cpu_start, NULL, 1);
116 ctr_running = 1; 115 ctr_running = 1;
116 /* make ctr_running visible to the nmi handler: */
117 smp_mb();
118 on_each_cpu(nmi_cpu_start, NULL, 1);
117 put_online_cpus(); 119 put_online_cpus();
118 return 0; 120 return 0;
119} 121}
@@ -504,15 +506,18 @@ static int nmi_setup(void)
504 506
505 nmi_enabled = 0; 507 nmi_enabled = 0;
506 ctr_running = 0; 508 ctr_running = 0;
507 barrier(); 509 /* make variables visible to the nmi handler: */
510 smp_mb();
508 err = register_die_notifier(&profile_exceptions_nb); 511 err = register_die_notifier(&profile_exceptions_nb);
509 if (err) 512 if (err)
510 goto fail; 513 goto fail;
511 514
512 get_online_cpus(); 515 get_online_cpus();
513 register_cpu_notifier(&oprofile_cpu_nb); 516 register_cpu_notifier(&oprofile_cpu_nb);
514 on_each_cpu(nmi_cpu_setup, NULL, 1);
515 nmi_enabled = 1; 517 nmi_enabled = 1;
518 /* make nmi_enabled visible to the nmi handler: */
519 smp_mb();
520 on_each_cpu(nmi_cpu_setup, NULL, 1);
516 put_online_cpus(); 521 put_online_cpus();
517 522
518 return 0; 523 return 0;
@@ -531,7 +536,8 @@ static void nmi_shutdown(void)
531 nmi_enabled = 0; 536 nmi_enabled = 0;
532 ctr_running = 0; 537 ctr_running = 0;
533 put_online_cpus(); 538 put_online_cpus();
534 barrier(); 539 /* make variables visible to the nmi handler: */
540 smp_mb();
535 unregister_die_notifier(&profile_exceptions_nb); 541 unregister_die_notifier(&profile_exceptions_nb);
536 msrs = &get_cpu_var(cpu_msrs); 542 msrs = &get_cpu_var(cpu_msrs);
537 model->shutdown(msrs); 543 model->shutdown(msrs);
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index fe008309ffec..f567965c0620 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -327,13 +327,12 @@ int __init pci_xen_hvm_init(void)
327} 327}
328 328
329#ifdef CONFIG_XEN_DOM0 329#ifdef CONFIG_XEN_DOM0
330static int xen_register_pirq(u32 gsi, int triggering) 330static int xen_register_pirq(u32 gsi, int gsi_override, int triggering)
331{ 331{
332 int rc, pirq, irq = -1; 332 int rc, pirq, irq = -1;
333 struct physdev_map_pirq map_irq; 333 struct physdev_map_pirq map_irq;
334 int shareable = 0; 334 int shareable = 0;
335 char *name; 335 char *name;
336 bool gsi_override = false;
337 336
338 if (!xen_pv_domain()) 337 if (!xen_pv_domain())
339 return -1; 338 return -1;
@@ -345,31 +344,12 @@ static int xen_register_pirq(u32 gsi, int triggering)
345 shareable = 1; 344 shareable = 1;
346 name = "ioapic-level"; 345 name = "ioapic-level";
347 } 346 }
348
349 pirq = xen_allocate_pirq_gsi(gsi); 347 pirq = xen_allocate_pirq_gsi(gsi);
350 if (pirq < 0) 348 if (pirq < 0)
351 goto out; 349 goto out;
352 350
353 /* Before we bind the GSI to a Linux IRQ, check whether 351 if (gsi_override >= 0)
354 * we need to override it with bus_irq (IRQ) value. Usually for 352 irq = xen_bind_pirq_gsi_to_irq(gsi_override, pirq, shareable, name);
355 * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
356 * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
357 * but there are oddballs where the IRQ != GSI:
358 * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
359 * which ends up being: gsi_to_irq[9] == 20
360 * (which is what acpi_gsi_to_irq ends up calling when starting the
361 * the ACPI interpreter and keels over since IRQ 9 has not been
362 * setup as we had setup IRQ 20 for it).
363 */
364 if (gsi == acpi_sci_override_gsi) {
365 /* Check whether the GSI != IRQ */
366 acpi_gsi_to_irq(gsi, &irq);
367 if (irq != gsi)
368 /* Bugger, we MUST have that IRQ. */
369 gsi_override = true;
370 }
371 if (gsi_override)
372 irq = xen_bind_pirq_gsi_to_irq(irq, pirq, shareable, name);
373 else 353 else
374 irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name); 354 irq = xen_bind_pirq_gsi_to_irq(gsi, pirq, shareable, name);
375 if (irq < 0) 355 if (irq < 0)
@@ -392,7 +372,7 @@ out:
392 return irq; 372 return irq;
393} 373}
394 374
395static int xen_register_gsi(u32 gsi, int triggering, int polarity) 375static int xen_register_gsi(u32 gsi, int gsi_override, int triggering, int polarity)
396{ 376{
397 int rc, irq; 377 int rc, irq;
398 struct physdev_setup_gsi setup_gsi; 378 struct physdev_setup_gsi setup_gsi;
@@ -403,7 +383,7 @@ static int xen_register_gsi(u32 gsi, int triggering, int polarity)
403 printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n", 383 printk(KERN_DEBUG "xen: registering gsi %u triggering %d polarity %d\n",
404 gsi, triggering, polarity); 384 gsi, triggering, polarity);
405 385
406 irq = xen_register_pirq(gsi, triggering); 386 irq = xen_register_pirq(gsi, gsi_override, triggering);
407 387
408 setup_gsi.gsi = gsi; 388 setup_gsi.gsi = gsi;
409 setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1); 389 setup_gsi.triggering = (triggering == ACPI_EDGE_SENSITIVE ? 0 : 1);
@@ -425,6 +405,8 @@ static __init void xen_setup_acpi_sci(void)
425 int rc; 405 int rc;
426 int trigger, polarity; 406 int trigger, polarity;
427 int gsi = acpi_sci_override_gsi; 407 int gsi = acpi_sci_override_gsi;
408 int irq = -1;
409 int gsi_override = -1;
428 410
429 if (!gsi) 411 if (!gsi)
430 return; 412 return;
@@ -441,7 +423,25 @@ static __init void xen_setup_acpi_sci(void)
441 printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d " 423 printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
442 "polarity=%d\n", gsi, trigger, polarity); 424 "polarity=%d\n", gsi, trigger, polarity);
443 425
444 gsi = xen_register_gsi(gsi, trigger, polarity); 426 /* Before we bind the GSI to a Linux IRQ, check whether
427 * we need to override it with bus_irq (IRQ) value. Usually for
428 * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
429 * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
430 * but there are oddballs where the IRQ != GSI:
431 * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
432 * which ends up being: gsi_to_irq[9] == 20
433 * (which is what acpi_gsi_to_irq ends up calling when starting the
434 * the ACPI interpreter and keels over since IRQ 9 has not been
435 * setup as we had setup IRQ 20 for it).
436 */
437 /* Check whether the GSI != IRQ */
438 if (acpi_gsi_to_irq(gsi, &irq) == 0) {
439 if (irq >= 0 && irq != gsi)
440 /* Bugger, we MUST have that IRQ. */
441 gsi_override = irq;
442 }
443
444 gsi = xen_register_gsi(gsi, gsi_override, trigger, polarity);
445 printk(KERN_INFO "xen: acpi sci %d\n", gsi); 445 printk(KERN_INFO "xen: acpi sci %d\n", gsi);
446 446
447 return; 447 return;
@@ -450,7 +450,7 @@ static __init void xen_setup_acpi_sci(void)
450static int acpi_register_gsi_xen(struct device *dev, u32 gsi, 450static int acpi_register_gsi_xen(struct device *dev, u32 gsi,
451 int trigger, int polarity) 451 int trigger, int polarity)
452{ 452{
453 return xen_register_gsi(gsi, trigger, polarity); 453 return xen_register_gsi(gsi, -1 /* no GSI override */, trigger, polarity);
454} 454}
455 455
456static int __init pci_xen_initial_domain(void) 456static int __init pci_xen_initial_domain(void)
@@ -489,7 +489,7 @@ void __init xen_setup_pirqs(void)
489 if (acpi_get_override_irq(irq, &trigger, &polarity) == -1) 489 if (acpi_get_override_irq(irq, &trigger, &polarity) == -1)
490 continue; 490 continue;
491 491
492 xen_register_pirq(irq, 492 xen_register_pirq(irq, -1 /* no GSI override */,
493 trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE); 493 trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE);
494 } 494 }
495} 495}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 474356b98ede..899e393d8e73 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -504,9 +504,6 @@ void __init efi_init(void)
504 x86_platform.set_wallclock = efi_set_rtc_mmss; 504 x86_platform.set_wallclock = efi_set_rtc_mmss;
505#endif 505#endif
506 506
507 /* Setup for EFI runtime service */
508 reboot_type = BOOT_EFI;
509
510#if EFI_DEBUG 507#if EFI_DEBUG
511 print_efi_memmap(); 508 print_efi_memmap();
512#endif 509#endif