aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/Makefile59
-rw-r--r--arch/arm/boot/compressed/atags_to_fdt.c44
-rw-r--r--arch/arm/boot/compressed/head.S40
-rw-r--r--arch/arm/boot/dts/bcm11351.dtsi8
-rw-r--r--arch/arm/common/mcpm_head.S4
-rw-r--r--arch/arm/common/mcpm_platsmp.c5
-rw-r--r--arch/arm/include/asm/div64.h2
-rw-r--r--arch/arm/include/asm/io.h8
-rw-r--r--arch/arm/include/asm/mmu_context.h23
-rw-r--r--arch/arm/include/asm/spinlock.h25
-rw-r--r--arch/arm/include/asm/thread_info.h2
-rw-r--r--arch/arm/include/asm/tls.h40
-rw-r--r--arch/arm/kernel/entry-armv.S5
-rw-r--r--arch/arm/kernel/entry-common.S42
-rw-r--r--arch/arm/kernel/perf_event.c1
-rw-r--r--arch/arm/kernel/process.c4
-rw-r--r--arch/arm/kernel/ptrace.c2
-rw-r--r--arch/arm/kernel/setup.c7
-rw-r--r--arch/arm/kernel/smp_tlb.c18
-rw-r--r--arch/arm/kernel/traps.c4
-rw-r--r--arch/arm/mach-ebsa110/core.c2
-rw-r--r--arch/arm/mach-imx/mm-imx3.c2
-rw-r--r--arch/arm/mach-iop13xx/io.c2
-rw-r--r--arch/arm/mach-ixp4xx/common.c2
-rw-r--r--arch/arm/mach-msm/common.h2
-rw-r--r--arch/arm/mach-msm/io.c2
-rw-r--r--arch/arm/mach-omap2/Kconfig1
-rw-r--r--arch/arm/mm/cache-l2x0.c158
-rw-r--r--arch/arm/mm/context.c55
-rw-r--r--arch/arm/mm/dma-mapping.c20
-rw-r--r--arch/arm/mm/flush.c2
-rw-r--r--arch/arm/mm/ioremap.c10
-rw-r--r--arch/arm/mm/nommu.c6
-rw-r--r--arch/arm/plat-versatile/headsmp.S2
35 files changed, 459 insertions, 153 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 136f263ed47b..5a326f935858 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -175,6 +175,9 @@ config ARCH_HAS_CPUFREQ
175 and that the relevant menu configurations are displayed for 175 and that the relevant menu configurations are displayed for
176 it. 176 it.
177 177
178config ARCH_HAS_BANDGAP
179 bool
180
178config GENERIC_HWEIGHT 181config GENERIC_HWEIGHT
179 bool 182 bool
180 default y 183 default y
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 1ba358ba16b8..de4e1cb2f14f 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -59,37 +59,43 @@ comma = ,
59# Note that GCC does not numerically define an architecture version 59# Note that GCC does not numerically define an architecture version
60# macro, but instead defines a whole series of macros which makes 60# macro, but instead defines a whole series of macros which makes
61# testing for a specific architecture or later rather impossible. 61# testing for a specific architecture or later rather impossible.
62arch-$(CONFIG_CPU_32v7) :=-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a) 62arch-$(CONFIG_CPU_32v7) =-D__LINUX_ARM_ARCH__=7 $(call cc-option,-march=armv7-a,-march=armv5t -Wa$(comma)-march=armv7-a)
63arch-$(CONFIG_CPU_32v6) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6) 63arch-$(CONFIG_CPU_32v6) =-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6,-march=armv5t -Wa$(comma)-march=armv6)
64# Only override the compiler option if ARMv6. The ARMv6K extensions are 64# Only override the compiler option if ARMv6. The ARMv6K extensions are
65# always available in ARMv7 65# always available in ARMv7
66ifeq ($(CONFIG_CPU_32v6),y) 66ifeq ($(CONFIG_CPU_32v6),y)
67arch-$(CONFIG_CPU_32v6K) :=-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6k,-march=armv5t -Wa$(comma)-march=armv6k) 67arch-$(CONFIG_CPU_32v6K) =-D__LINUX_ARM_ARCH__=6 $(call cc-option,-march=armv6k,-march=armv5t -Wa$(comma)-march=armv6k)
68endif 68endif
69arch-$(CONFIG_CPU_32v5) :=-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t) 69arch-$(CONFIG_CPU_32v5) =-D__LINUX_ARM_ARCH__=5 $(call cc-option,-march=armv5te,-march=armv4t)
70arch-$(CONFIG_CPU_32v4T) :=-D__LINUX_ARM_ARCH__=4 -march=armv4t 70arch-$(CONFIG_CPU_32v4T) =-D__LINUX_ARM_ARCH__=4 -march=armv4t
71arch-$(CONFIG_CPU_32v4) :=-D__LINUX_ARM_ARCH__=4 -march=armv4 71arch-$(CONFIG_CPU_32v4) =-D__LINUX_ARM_ARCH__=4 -march=armv4
72arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3 72arch-$(CONFIG_CPU_32v3) =-D__LINUX_ARM_ARCH__=3 -march=armv3
73
74# Evaluate arch cc-option calls now
75arch-y := $(arch-y)
73 76
74# This selects how we optimise for the processor. 77# This selects how we optimise for the processor.
75tune-$(CONFIG_CPU_ARM7TDMI) :=-mtune=arm7tdmi 78tune-$(CONFIG_CPU_ARM7TDMI) =-mtune=arm7tdmi
76tune-$(CONFIG_CPU_ARM720T) :=-mtune=arm7tdmi 79tune-$(CONFIG_CPU_ARM720T) =-mtune=arm7tdmi
77tune-$(CONFIG_CPU_ARM740T) :=-mtune=arm7tdmi 80tune-$(CONFIG_CPU_ARM740T) =-mtune=arm7tdmi
78tune-$(CONFIG_CPU_ARM9TDMI) :=-mtune=arm9tdmi 81tune-$(CONFIG_CPU_ARM9TDMI) =-mtune=arm9tdmi
79tune-$(CONFIG_CPU_ARM940T) :=-mtune=arm9tdmi 82tune-$(CONFIG_CPU_ARM940T) =-mtune=arm9tdmi
80tune-$(CONFIG_CPU_ARM946E) :=$(call cc-option,-mtune=arm9e,-mtune=arm9tdmi) 83tune-$(CONFIG_CPU_ARM946E) =$(call cc-option,-mtune=arm9e,-mtune=arm9tdmi)
81tune-$(CONFIG_CPU_ARM920T) :=-mtune=arm9tdmi 84tune-$(CONFIG_CPU_ARM920T) =-mtune=arm9tdmi
82tune-$(CONFIG_CPU_ARM922T) :=-mtune=arm9tdmi 85tune-$(CONFIG_CPU_ARM922T) =-mtune=arm9tdmi
83tune-$(CONFIG_CPU_ARM925T) :=-mtune=arm9tdmi 86tune-$(CONFIG_CPU_ARM925T) =-mtune=arm9tdmi
84tune-$(CONFIG_CPU_ARM926T) :=-mtune=arm9tdmi 87tune-$(CONFIG_CPU_ARM926T) =-mtune=arm9tdmi
85tune-$(CONFIG_CPU_FA526) :=-mtune=arm9tdmi 88tune-$(CONFIG_CPU_FA526) =-mtune=arm9tdmi
86tune-$(CONFIG_CPU_SA110) :=-mtune=strongarm110 89tune-$(CONFIG_CPU_SA110) =-mtune=strongarm110
87tune-$(CONFIG_CPU_SA1100) :=-mtune=strongarm1100 90tune-$(CONFIG_CPU_SA1100) =-mtune=strongarm1100
88tune-$(CONFIG_CPU_XSCALE) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale 91tune-$(CONFIG_CPU_XSCALE) =$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale
89tune-$(CONFIG_CPU_XSC3) :=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale 92tune-$(CONFIG_CPU_XSC3) =$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale
90tune-$(CONFIG_CPU_FEROCEON) :=$(call cc-option,-mtune=marvell-f,-mtune=xscale) 93tune-$(CONFIG_CPU_FEROCEON) =$(call cc-option,-mtune=marvell-f,-mtune=xscale)
91tune-$(CONFIG_CPU_V6) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) 94tune-$(CONFIG_CPU_V6) =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
92tune-$(CONFIG_CPU_V6K) :=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm) 95tune-$(CONFIG_CPU_V6K) =$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)
96
97# Evaluate tune cc-option calls now
98tune-y := $(tune-y)
93 99
94ifeq ($(CONFIG_AEABI),y) 100ifeq ($(CONFIG_AEABI),y)
95CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork 101CFLAGS_ABI :=-mabi=aapcs-linux -mno-thumb-interwork
@@ -289,9 +295,10 @@ zImage Image xipImage bootpImage uImage: vmlinux
289zinstall uinstall install: vmlinux 295zinstall uinstall install: vmlinux
290 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@ 296 $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
291 297
292%.dtb: scripts 298%.dtb: | scripts
293 $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) $(boot)/dts/$@ 299 $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) $(boot)/dts/$@
294 300
301PHONY += dtbs
295dtbs: scripts 302dtbs: scripts
296 $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) dtbs 303 $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) dtbs
297 304
diff --git a/arch/arm/boot/compressed/atags_to_fdt.c b/arch/arm/boot/compressed/atags_to_fdt.c
index aabc02a68482..d1153c8a765a 100644
--- a/arch/arm/boot/compressed/atags_to_fdt.c
+++ b/arch/arm/boot/compressed/atags_to_fdt.c
@@ -53,6 +53,17 @@ static const void *getprop(const void *fdt, const char *node_path,
53 return fdt_getprop(fdt, offset, property, len); 53 return fdt_getprop(fdt, offset, property, len);
54} 54}
55 55
56static uint32_t get_cell_size(const void *fdt)
57{
58 int len;
59 uint32_t cell_size = 1;
60 const uint32_t *size_len = getprop(fdt, "/", "#size-cells", &len);
61
62 if (size_len)
63 cell_size = fdt32_to_cpu(*size_len);
64 return cell_size;
65}
66
56static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline) 67static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline)
57{ 68{
58 char cmdline[COMMAND_LINE_SIZE]; 69 char cmdline[COMMAND_LINE_SIZE];
@@ -95,9 +106,11 @@ static void merge_fdt_bootargs(void *fdt, const char *fdt_cmdline)
95int atags_to_fdt(void *atag_list, void *fdt, int total_space) 106int atags_to_fdt(void *atag_list, void *fdt, int total_space)
96{ 107{
97 struct tag *atag = atag_list; 108 struct tag *atag = atag_list;
98 uint32_t mem_reg_property[2 * NR_BANKS]; 109 /* In the case of 64 bits memory size, need to reserve 2 cells for
110 * address and size for each bank */
111 uint32_t mem_reg_property[2 * 2 * NR_BANKS];
99 int memcount = 0; 112 int memcount = 0;
100 int ret; 113 int ret, memsize;
101 114
102 /* make sure we've got an aligned pointer */ 115 /* make sure we've got an aligned pointer */
103 if ((u32)atag_list & 0x3) 116 if ((u32)atag_list & 0x3)
@@ -137,8 +150,25 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
137 continue; 150 continue;
138 if (!atag->u.mem.size) 151 if (!atag->u.mem.size)
139 continue; 152 continue;
140 mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.start); 153 memsize = get_cell_size(fdt);
141 mem_reg_property[memcount++] = cpu_to_fdt32(atag->u.mem.size); 154
155 if (memsize == 2) {
156 /* if memsize is 2, that means that
157 * each data needs 2 cells of 32 bits,
158 * so the data are 64 bits */
159 uint64_t *mem_reg_prop64 =
160 (uint64_t *)mem_reg_property;
161 mem_reg_prop64[memcount++] =
162 cpu_to_fdt64(atag->u.mem.start);
163 mem_reg_prop64[memcount++] =
164 cpu_to_fdt64(atag->u.mem.size);
165 } else {
166 mem_reg_property[memcount++] =
167 cpu_to_fdt32(atag->u.mem.start);
168 mem_reg_property[memcount++] =
169 cpu_to_fdt32(atag->u.mem.size);
170 }
171
142 } else if (atag->hdr.tag == ATAG_INITRD2) { 172 } else if (atag->hdr.tag == ATAG_INITRD2) {
143 uint32_t initrd_start, initrd_size; 173 uint32_t initrd_start, initrd_size;
144 initrd_start = atag->u.initrd.start; 174 initrd_start = atag->u.initrd.start;
@@ -150,8 +180,10 @@ int atags_to_fdt(void *atag_list, void *fdt, int total_space)
150 } 180 }
151 } 181 }
152 182
153 if (memcount) 183 if (memcount) {
154 setprop(fdt, "/memory", "reg", mem_reg_property, 4*memcount); 184 setprop(fdt, "/memory", "reg", mem_reg_property,
185 4 * memcount * memsize);
186 }
155 187
156 return fdt_pack(fdt); 188 return fdt_pack(fdt);
157} 189}
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 032a8d987148..75189f13cf54 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -142,7 +142,6 @@ start:
142 mov r7, r1 @ save architecture ID 142 mov r7, r1 @ save architecture ID
143 mov r8, r2 @ save atags pointer 143 mov r8, r2 @ save atags pointer
144 144
145#ifndef __ARM_ARCH_2__
146 /* 145 /*
147 * Booting from Angel - need to enter SVC mode and disable 146 * Booting from Angel - need to enter SVC mode and disable
148 * FIQs/IRQs (numeric definitions from angel arm.h source). 147 * FIQs/IRQs (numeric definitions from angel arm.h source).
@@ -158,10 +157,6 @@ not_angel:
158 safe_svcmode_maskall r0 157 safe_svcmode_maskall r0
159 msr spsr_cxsf, r9 @ Save the CPU boot mode in 158 msr spsr_cxsf, r9 @ Save the CPU boot mode in
160 @ SPSR 159 @ SPSR
161#else
162 teqp pc, #0x0c000003 @ turn off interrupts
163#endif
164
165 /* 160 /*
166 * Note that some cache flushing and other stuff may 161 * Note that some cache flushing and other stuff may
167 * be needed here - is there an Angel SWI call for this? 162 * be needed here - is there an Angel SWI call for this?
@@ -183,7 +178,19 @@ not_angel:
183 ldr r4, =zreladdr 178 ldr r4, =zreladdr
184#endif 179#endif
185 180
186 bl cache_on 181 /*
182 * Set up a page table only if it won't overwrite ourself.
183 * That means r4 < pc && r4 - 16k page directory > &_end.
184 * Given that r4 > &_end is most unfrequent, we add a rough
185 * additional 1MB of room for a possible appended DTB.
186 */
187 mov r0, pc
188 cmp r0, r4
189 ldrcc r0, LC0+32
190 addcc r0, r0, pc
191 cmpcc r4, r0
192 orrcc r4, r4, #1 @ remember we skipped cache_on
193 blcs cache_on
187 194
188restart: adr r0, LC0 195restart: adr r0, LC0
189 ldmia r0, {r1, r2, r3, r6, r10, r11, r12} 196 ldmia r0, {r1, r2, r3, r6, r10, r11, r12}
@@ -229,7 +236,7 @@ restart: adr r0, LC0
229 * r0 = delta 236 * r0 = delta
230 * r2 = BSS start 237 * r2 = BSS start
231 * r3 = BSS end 238 * r3 = BSS end
232 * r4 = final kernel address 239 * r4 = final kernel address (possibly with LSB set)
233 * r5 = appended dtb size (still unknown) 240 * r5 = appended dtb size (still unknown)
234 * r6 = _edata 241 * r6 = _edata
235 * r7 = architecture ID 242 * r7 = architecture ID
@@ -277,6 +284,7 @@ restart: adr r0, LC0
277 */ 284 */
278 cmp r0, #1 285 cmp r0, #1
279 sub r0, r4, #TEXT_OFFSET 286 sub r0, r4, #TEXT_OFFSET
287 bic r0, r0, #1
280 add r0, r0, #0x100 288 add r0, r0, #0x100
281 mov r1, r6 289 mov r1, r6
282 sub r2, sp, r6 290 sub r2, sp, r6
@@ -323,12 +331,13 @@ dtb_check_done:
323 331
324/* 332/*
325 * Check to see if we will overwrite ourselves. 333 * Check to see if we will overwrite ourselves.
326 * r4 = final kernel address 334 * r4 = final kernel address (possibly with LSB set)
327 * r9 = size of decompressed image 335 * r9 = size of decompressed image
328 * r10 = end of this image, including bss/stack/malloc space if non XIP 336 * r10 = end of this image, including bss/stack/malloc space if non XIP
329 * We basically want: 337 * We basically want:
330 * r4 - 16k page directory >= r10 -> OK 338 * r4 - 16k page directory >= r10 -> OK
331 * r4 + image length <= address of wont_overwrite -> OK 339 * r4 + image length <= address of wont_overwrite -> OK
340 * Note: the possible LSB in r4 is harmless here.
332 */ 341 */
333 add r10, r10, #16384 342 add r10, r10, #16384
334 cmp r4, r10 343 cmp r4, r10
@@ -390,7 +399,8 @@ dtb_check_done:
390 add sp, sp, r6 399 add sp, sp, r6
391#endif 400#endif
392 401
393 bl cache_clean_flush 402 tst r4, #1
403 bleq cache_clean_flush
394 404
395 adr r0, BSYM(restart) 405 adr r0, BSYM(restart)
396 add r0, r0, r6 406 add r0, r0, r6
@@ -402,7 +412,7 @@ wont_overwrite:
402 * r0 = delta 412 * r0 = delta
403 * r2 = BSS start 413 * r2 = BSS start
404 * r3 = BSS end 414 * r3 = BSS end
405 * r4 = kernel execution address 415 * r4 = kernel execution address (possibly with LSB set)
406 * r5 = appended dtb size (0 if not present) 416 * r5 = appended dtb size (0 if not present)
407 * r7 = architecture ID 417 * r7 = architecture ID
408 * r8 = atags pointer 418 * r8 = atags pointer
@@ -465,6 +475,15 @@ not_relocated: mov r0, #0
465 cmp r2, r3 475 cmp r2, r3
466 blo 1b 476 blo 1b
467 477
478 /*
479 * Did we skip the cache setup earlier?
480 * That is indicated by the LSB in r4.
481 * Do it now if so.
482 */
483 tst r4, #1
484 bic r4, r4, #1
485 blne cache_on
486
468/* 487/*
469 * The C runtime environment should now be setup sufficiently. 488 * The C runtime environment should now be setup sufficiently.
470 * Set up some pointers, and start decompressing. 489 * Set up some pointers, and start decompressing.
@@ -513,6 +532,7 @@ LC0: .word LC0 @ r1
513 .word _got_start @ r11 532 .word _got_start @ r11
514 .word _got_end @ ip 533 .word _got_end @ ip
515 .word .L_user_stack_end @ sp 534 .word .L_user_stack_end @ sp
535 .word _end - restart + 16384 + 1024*1024
516 .size LC0, . - LC0 536 .size LC0, . - LC0
517 537
518#ifdef CONFIG_ARCH_RPC 538#ifdef CONFIG_ARCH_RPC
diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi
index 41b2c6c33f09..5e48c85abc2f 100644
--- a/arch/arm/boot/dts/bcm11351.dtsi
+++ b/arch/arm/boot/dts/bcm11351.dtsi
@@ -47,10 +47,10 @@
47 }; 47 };
48 48
49 L2: l2-cache { 49 L2: l2-cache {
50 compatible = "arm,pl310-cache"; 50 compatible = "bcm,bcm11351-a2-pl310-cache";
51 reg = <0x3ff20000 0x1000>; 51 reg = <0x3ff20000 0x1000>;
52 cache-unified; 52 cache-unified;
53 cache-level = <2>; 53 cache-level = <2>;
54 }; 54 };
55 55
56 timer@35006000 { 56 timer@35006000 {
diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S
index 8178705c4b24..80f033614a1f 100644
--- a/arch/arm/common/mcpm_head.S
+++ b/arch/arm/common/mcpm_head.S
@@ -32,11 +32,11 @@
321901: adr r0, 1902b 321901: adr r0, 1902b
33 bl printascii 33 bl printascii
34 mov r0, r9 34 mov r0, r9
35 bl printhex8 35 bl printhex2
36 adr r0, 1903b 36 adr r0, 1903b
37 bl printascii 37 bl printascii
38 mov r0, r10 38 mov r0, r10
39 bl printhex8 39 bl printhex2
40 adr r0, 1904b 40 adr r0, 1904b
41 bl printascii 41 bl printascii
42#endif 42#endif
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
index 3caed0db6986..510e5b13aa2e 100644
--- a/arch/arm/common/mcpm_platsmp.c
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -19,10 +19,6 @@
19#include <asm/smp.h> 19#include <asm/smp.h>
20#include <asm/smp_plat.h> 20#include <asm/smp_plat.h>
21 21
22static void __init simple_smp_init_cpus(void)
23{
24}
25
26static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle) 22static int __cpuinit mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
27{ 23{
28 unsigned int mpidr, pcpu, pcluster, ret; 24 unsigned int mpidr, pcpu, pcluster, ret;
@@ -74,7 +70,6 @@ static void mcpm_cpu_die(unsigned int cpu)
74#endif 70#endif
75 71
76static struct smp_operations __initdata mcpm_smp_ops = { 72static struct smp_operations __initdata mcpm_smp_ops = {
77 .smp_init_cpus = simple_smp_init_cpus,
78 .smp_boot_secondary = mcpm_boot_secondary, 73 .smp_boot_secondary = mcpm_boot_secondary,
79 .smp_secondary_init = mcpm_secondary_init, 74 .smp_secondary_init = mcpm_secondary_init,
80#ifdef CONFIG_HOTPLUG_CPU 75#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/arm/include/asm/div64.h b/arch/arm/include/asm/div64.h
index fe92ccf1d0b0..191ada6e4d2d 100644
--- a/arch/arm/include/asm/div64.h
+++ b/arch/arm/include/asm/div64.h
@@ -46,7 +46,7 @@
46 __rem; \ 46 __rem; \
47}) 47})
48 48
49#if __GNUC__ < 4 49#if __GNUC__ < 4 || !defined(CONFIG_AEABI)
50 50
51/* 51/*
52 * gcc versions earlier than 4.0 are simply too problematic for the 52 * gcc versions earlier than 4.0 are simply too problematic for the
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 652b56086de7..d070741b2b37 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -130,16 +130,16 @@ static inline u32 __raw_readl(const volatile void __iomem *addr)
130 */ 130 */
131extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long, 131extern void __iomem *__arm_ioremap_pfn_caller(unsigned long, unsigned long,
132 size_t, unsigned int, void *); 132 size_t, unsigned int, void *);
133extern void __iomem *__arm_ioremap_caller(unsigned long, size_t, unsigned int, 133extern void __iomem *__arm_ioremap_caller(phys_addr_t, size_t, unsigned int,
134 void *); 134 void *);
135 135
136extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int); 136extern void __iomem *__arm_ioremap_pfn(unsigned long, unsigned long, size_t, unsigned int);
137extern void __iomem *__arm_ioremap(unsigned long, size_t, unsigned int); 137extern void __iomem *__arm_ioremap(phys_addr_t, size_t, unsigned int);
138extern void __iomem *__arm_ioremap_exec(unsigned long, size_t, bool cached); 138extern void __iomem *__arm_ioremap_exec(phys_addr_t, size_t, bool cached);
139extern void __iounmap(volatile void __iomem *addr); 139extern void __iounmap(volatile void __iomem *addr);
140extern void __arm_iounmap(volatile void __iomem *addr); 140extern void __arm_iounmap(volatile void __iomem *addr);
141 141
142extern void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, 142extern void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
143 unsigned int, void *); 143 unsigned int, void *);
144extern void (*arch_iounmap)(volatile void __iomem *); 144extern void (*arch_iounmap)(volatile void __iomem *);
145 145
diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h
index a7b85e0d0cc1..b5792b7fd8d3 100644
--- a/arch/arm/include/asm/mmu_context.h
+++ b/arch/arm/include/asm/mmu_context.h
@@ -18,6 +18,7 @@
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19#include <asm/cachetype.h> 19#include <asm/cachetype.h>
20#include <asm/proc-fns.h> 20#include <asm/proc-fns.h>
21#include <asm/smp_plat.h>
21#include <asm-generic/mm_hooks.h> 22#include <asm-generic/mm_hooks.h>
22 23
23void __check_vmalloc_seq(struct mm_struct *mm); 24void __check_vmalloc_seq(struct mm_struct *mm);
@@ -27,7 +28,15 @@ void __check_vmalloc_seq(struct mm_struct *mm);
27void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk); 28void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
28#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; }) 29#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
29 30
30DECLARE_PER_CPU(atomic64_t, active_asids); 31#ifdef CONFIG_ARM_ERRATA_798181
32void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
33 cpumask_t *mask);
34#else /* !CONFIG_ARM_ERRATA_798181 */
35static inline void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
36 cpumask_t *mask)
37{
38}
39#endif /* CONFIG_ARM_ERRATA_798181 */
31 40
32#else /* !CONFIG_CPU_HAS_ASID */ 41#else /* !CONFIG_CPU_HAS_ASID */
33 42
@@ -98,12 +107,16 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
98#ifdef CONFIG_MMU 107#ifdef CONFIG_MMU
99 unsigned int cpu = smp_processor_id(); 108 unsigned int cpu = smp_processor_id();
100 109
101#ifdef CONFIG_SMP 110 /*
102 /* check for possible thread migration */ 111 * __sync_icache_dcache doesn't broadcast the I-cache invalidation,
103 if (!cpumask_empty(mm_cpumask(next)) && 112 * so check for possible thread migration and invalidate the I-cache
113 * if we're new to this CPU.
114 */
115 if (cache_ops_need_broadcast() &&
116 !cpumask_empty(mm_cpumask(next)) &&
104 !cpumask_test_cpu(cpu, mm_cpumask(next))) 117 !cpumask_test_cpu(cpu, mm_cpumask(next)))
105 __flush_icache_all(); 118 __flush_icache_all();
106#endif 119
107 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { 120 if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) {
108 check_and_switch_context(next, tsk); 121 check_and_switch_context(next, tsk);
109 if (cache_is_vivt()) 122 if (cache_is_vivt())
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 6220e9fdf4c7..f8b8965666e9 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -97,19 +97,22 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
97 97
98static inline int arch_spin_trylock(arch_spinlock_t *lock) 98static inline int arch_spin_trylock(arch_spinlock_t *lock)
99{ 99{
100 unsigned long tmp; 100 unsigned long contended, res;
101 u32 slock; 101 u32 slock;
102 102
103 __asm__ __volatile__( 103 do {
104" ldrex %0, [%2]\n" 104 __asm__ __volatile__(
105" subs %1, %0, %0, ror #16\n" 105 " ldrex %0, [%3]\n"
106" addeq %0, %0, %3\n" 106 " mov %2, #0\n"
107" strexeq %1, %0, [%2]" 107 " subs %1, %0, %0, ror #16\n"
108 : "=&r" (slock), "=&r" (tmp) 108 " addeq %0, %0, %4\n"
109 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT) 109 " strexeq %2, %0, [%3]"
110 : "cc"); 110 : "=&r" (slock), "=&r" (contended), "=r" (res)
111 111 : "r" (&lock->slock), "I" (1 << TICKET_SHIFT)
112 if (tmp == 0) { 112 : "cc");
113 } while (res);
114
115 if (!contended) {
113 smp_mb(); 116 smp_mb();
114 return 1; 117 return 1;
115 } else { 118 } else {
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 1995d1a84060..214d4158089a 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -58,7 +58,7 @@ struct thread_info {
58 struct cpu_context_save cpu_context; /* cpu context */ 58 struct cpu_context_save cpu_context; /* cpu context */
59 __u32 syscall; /* syscall number */ 59 __u32 syscall; /* syscall number */
60 __u8 used_cp[16]; /* thread used copro */ 60 __u8 used_cp[16]; /* thread used copro */
61 unsigned long tp_value; 61 unsigned long tp_value[2]; /* TLS registers */
62#ifdef CONFIG_CRUNCH 62#ifdef CONFIG_CRUNCH
63 struct crunch_state crunchstate; 63 struct crunch_state crunchstate;
64#endif 64#endif
diff --git a/arch/arm/include/asm/tls.h b/arch/arm/include/asm/tls.h
index 73409e6c0251..83259b873333 100644
--- a/arch/arm/include/asm/tls.h
+++ b/arch/arm/include/asm/tls.h
@@ -2,27 +2,30 @@
2#define __ASMARM_TLS_H 2#define __ASMARM_TLS_H
3 3
4#ifdef __ASSEMBLY__ 4#ifdef __ASSEMBLY__
5 .macro set_tls_none, tp, tmp1, tmp2 5#include <asm/asm-offsets.h>
6 .macro switch_tls_none, base, tp, tpuser, tmp1, tmp2
6 .endm 7 .endm
7 8
8 .macro set_tls_v6k, tp, tmp1, tmp2 9 .macro switch_tls_v6k, base, tp, tpuser, tmp1, tmp2
10 mrc p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register
9 mcr p15, 0, \tp, c13, c0, 3 @ set TLS register 11 mcr p15, 0, \tp, c13, c0, 3 @ set TLS register
10 mov \tmp1, #0 12 mcr p15, 0, \tpuser, c13, c0, 2 @ and the user r/w register
11 mcr p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register 13 str \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
12 .endm 14 .endm
13 15
14 .macro set_tls_v6, tp, tmp1, tmp2 16 .macro switch_tls_v6, base, tp, tpuser, tmp1, tmp2
15 ldr \tmp1, =elf_hwcap 17 ldr \tmp1, =elf_hwcap
16 ldr \tmp1, [\tmp1, #0] 18 ldr \tmp1, [\tmp1, #0]
17 mov \tmp2, #0xffff0fff 19 mov \tmp2, #0xffff0fff
18 tst \tmp1, #HWCAP_TLS @ hardware TLS available? 20 tst \tmp1, #HWCAP_TLS @ hardware TLS available?
19 mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
20 movne \tmp1, #0
21 mcrne p15, 0, \tmp1, c13, c0, 2 @ clear user r/w TLS register
22 streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0 21 streq \tp, [\tmp2, #-15] @ set TLS value at 0xffff0ff0
22 mrcne p15, 0, \tmp2, c13, c0, 2 @ get the user r/w register
23 mcrne p15, 0, \tp, c13, c0, 3 @ yes, set TLS register
24 mcrne p15, 0, \tpuser, c13, c0, 2 @ set user r/w register
25 strne \tmp2, [\base, #TI_TP_VALUE + 4] @ save it
23 .endm 26 .endm
24 27
25 .macro set_tls_software, tp, tmp1, tmp2 28 .macro switch_tls_software, base, tp, tpuser, tmp1, tmp2
26 mov \tmp1, #0xffff0fff 29 mov \tmp1, #0xffff0fff
27 str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0 30 str \tp, [\tmp1, #-15] @ set TLS value at 0xffff0ff0
28 .endm 31 .endm
@@ -31,19 +34,30 @@
31#ifdef CONFIG_TLS_REG_EMUL 34#ifdef CONFIG_TLS_REG_EMUL
32#define tls_emu 1 35#define tls_emu 1
33#define has_tls_reg 1 36#define has_tls_reg 1
34#define set_tls set_tls_none 37#define switch_tls switch_tls_none
35#elif defined(CONFIG_CPU_V6) 38#elif defined(CONFIG_CPU_V6)
36#define tls_emu 0 39#define tls_emu 0
37#define has_tls_reg (elf_hwcap & HWCAP_TLS) 40#define has_tls_reg (elf_hwcap & HWCAP_TLS)
38#define set_tls set_tls_v6 41#define switch_tls switch_tls_v6
39#elif defined(CONFIG_CPU_32v6K) 42#elif defined(CONFIG_CPU_32v6K)
40#define tls_emu 0 43#define tls_emu 0
41#define has_tls_reg 1 44#define has_tls_reg 1
42#define set_tls set_tls_v6k 45#define switch_tls switch_tls_v6k
43#else 46#else
44#define tls_emu 0 47#define tls_emu 0
45#define has_tls_reg 0 48#define has_tls_reg 0
46#define set_tls set_tls_software 49#define switch_tls switch_tls_software
47#endif 50#endif
48 51
52#ifndef __ASSEMBLY__
53static inline unsigned long get_tpuser(void)
54{
55 unsigned long reg = 0;
56
57 if (has_tls_reg && !tls_emu)
58 __asm__("mrc p15, 0, %0, c13, c0, 2" : "=r" (reg));
59
60 return reg;
61}
62#endif
49#endif /* __ASMARM_TLS_H */ 63#endif /* __ASMARM_TLS_H */
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 582b405befc5..a39cfc2a1f90 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -685,15 +685,16 @@ ENTRY(__switch_to)
685 UNWIND(.fnstart ) 685 UNWIND(.fnstart )
686 UNWIND(.cantunwind ) 686 UNWIND(.cantunwind )
687 add ip, r1, #TI_CPU_SAVE 687 add ip, r1, #TI_CPU_SAVE
688 ldr r3, [r2, #TI_TP_VALUE]
689 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack 688 ARM( stmia ip!, {r4 - sl, fp, sp, lr} ) @ Store most regs on stack
690 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack 689 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
691 THUMB( str sp, [ip], #4 ) 690 THUMB( str sp, [ip], #4 )
692 THUMB( str lr, [ip], #4 ) 691 THUMB( str lr, [ip], #4 )
692 ldr r4, [r2, #TI_TP_VALUE]
693 ldr r5, [r2, #TI_TP_VALUE + 4]
693#ifdef CONFIG_CPU_USE_DOMAINS 694#ifdef CONFIG_CPU_USE_DOMAINS
694 ldr r6, [r2, #TI_CPU_DOMAIN] 695 ldr r6, [r2, #TI_CPU_DOMAIN]
695#endif 696#endif
696 set_tls r3, r4, r5 697 switch_tls r1, r4, r5, r3, r7
697#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP) 698#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
698 ldr r7, [r2, #TI_TASK] 699 ldr r7, [r2, #TI_TASK]
699 ldr r8, =__stack_chk_guard 700 ldr r8, =__stack_chk_guard
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index bc5bc0a97131..4bc816a74a2e 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -362,6 +362,16 @@ ENTRY(vector_swi)
362 str r0, [sp, #S_OLD_R0] @ Save OLD_R0 362 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
363 zero_fp 363 zero_fp
364 364
365#ifdef CONFIG_ALIGNMENT_TRAP
366 ldr ip, __cr_alignment
367 ldr ip, [ip]
368 mcr p15, 0, ip, c1, c0 @ update control register
369#endif
370
371 enable_irq
372 ct_user_exit
373 get_thread_info tsk
374
365 /* 375 /*
366 * Get the system call number. 376 * Get the system call number.
367 */ 377 */
@@ -375,9 +385,9 @@ ENTRY(vector_swi)
375#ifdef CONFIG_ARM_THUMB 385#ifdef CONFIG_ARM_THUMB
376 tst r8, #PSR_T_BIT 386 tst r8, #PSR_T_BIT
377 movne r10, #0 @ no thumb OABI emulation 387 movne r10, #0 @ no thumb OABI emulation
378 ldreq r10, [lr, #-4] @ get SWI instruction 388 USER( ldreq r10, [lr, #-4] ) @ get SWI instruction
379#else 389#else
380 ldr r10, [lr, #-4] @ get SWI instruction 390 USER( ldr r10, [lr, #-4] ) @ get SWI instruction
381#endif 391#endif
382#ifdef CONFIG_CPU_ENDIAN_BE8 392#ifdef CONFIG_CPU_ENDIAN_BE8
383 rev r10, r10 @ little endian instruction 393 rev r10, r10 @ little endian instruction
@@ -392,22 +402,13 @@ ENTRY(vector_swi)
392 /* Legacy ABI only, possibly thumb mode. */ 402 /* Legacy ABI only, possibly thumb mode. */
393 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs 403 tst r8, #PSR_T_BIT @ this is SPSR from save_user_regs
394 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in 404 addne scno, r7, #__NR_SYSCALL_BASE @ put OS number in
395 ldreq scno, [lr, #-4] 405 USER( ldreq scno, [lr, #-4] )
396 406
397#else 407#else
398 /* Legacy ABI only. */ 408 /* Legacy ABI only. */
399 ldr scno, [lr, #-4] @ get SWI instruction 409 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
400#endif 410#endif
401 411
402#ifdef CONFIG_ALIGNMENT_TRAP
403 ldr ip, __cr_alignment
404 ldr ip, [ip]
405 mcr p15, 0, ip, c1, c0 @ update control register
406#endif
407 enable_irq
408 ct_user_exit
409
410 get_thread_info tsk
411 adr tbl, sys_call_table @ load syscall table pointer 412 adr tbl, sys_call_table @ load syscall table pointer
412 413
413#if defined(CONFIG_OABI_COMPAT) 414#if defined(CONFIG_OABI_COMPAT)
@@ -442,6 +443,21 @@ local_restart:
442 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back 443 eor r0, scno, #__NR_SYSCALL_BASE @ put OS number back
443 bcs arm_syscall 444 bcs arm_syscall
444 b sys_ni_syscall @ not private func 445 b sys_ni_syscall @ not private func
446
447#if defined(CONFIG_OABI_COMPAT) || !defined(CONFIG_AEABI)
448 /*
449 * We failed to handle a fault trying to access the page
450 * containing the swi instruction, but we're not really in a
451 * position to return -EFAULT. Instead, return back to the
452 * instruction and re-enter the user fault handling path trying
453 * to page it in. This will likely result in sending SEGV to the
454 * current task.
455 */
4569001:
457 sub lr, lr, #4
458 str lr, [sp, #S_PC]
459 b ret_fast_syscall
460#endif
445ENDPROC(vector_swi) 461ENDPROC(vector_swi)
446 462
447 /* 463 /*
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 8c3094d0f7b7..d9f5cd4e533f 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -569,6 +569,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
569 return; 569 return;
570 } 570 }
571 571
572 perf_callchain_store(entry, regs->ARM_pc);
572 tail = (struct frame_tail __user *)regs->ARM_fp - 1; 573 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
573 574
574 while ((entry->nr < PERF_MAX_STACK_DEPTH) && 575 while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 6e8931ccf13e..7f1efcd4a6e9 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -39,6 +39,7 @@
39#include <asm/thread_notify.h> 39#include <asm/thread_notify.h>
40#include <asm/stacktrace.h> 40#include <asm/stacktrace.h>
41#include <asm/mach/time.h> 41#include <asm/mach/time.h>
42#include <asm/tls.h>
42 43
43#ifdef CONFIG_CC_STACKPROTECTOR 44#ifdef CONFIG_CC_STACKPROTECTOR
44#include <linux/stackprotector.h> 45#include <linux/stackprotector.h>
@@ -374,7 +375,8 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
374 clear_ptrace_hw_breakpoint(p); 375 clear_ptrace_hw_breakpoint(p);
375 376
376 if (clone_flags & CLONE_SETTLS) 377 if (clone_flags & CLONE_SETTLS)
377 thread->tp_value = childregs->ARM_r3; 378 thread->tp_value[0] = childregs->ARM_r3;
379 thread->tp_value[1] = get_tpuser();
378 380
379 thread_notify(THREAD_NOTIFY_COPY, thread); 381 thread_notify(THREAD_NOTIFY_COPY, thread);
380 382
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 03deeffd9f6d..2bc1514d6dbe 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -849,7 +849,7 @@ long arch_ptrace(struct task_struct *child, long request,
849#endif 849#endif
850 850
851 case PTRACE_GET_THREAD_AREA: 851 case PTRACE_GET_THREAD_AREA:
852 ret = put_user(task_thread_info(child)->tp_value, 852 ret = put_user(task_thread_info(child)->tp_value[0],
853 datap); 853 datap);
854 break; 854 break;
855 855
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index b4b1d397592b..0cde326f5542 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -456,6 +456,13 @@ void __init smp_setup_processor_id(void)
456 for (i = 1; i < nr_cpu_ids; ++i) 456 for (i = 1; i < nr_cpu_ids; ++i)
457 cpu_logical_map(i) = i == cpu ? 0 : i; 457 cpu_logical_map(i) = i == cpu ? 0 : i;
458 458
459 /*
460 * clear __my_cpu_offset on boot CPU to avoid hang caused by
461 * using percpu variable early, for example, lockdep will
462 * access percpu variable inside lock_release
463 */
464 set_my_cpu_offset(0);
465
459 printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr); 466 printk(KERN_INFO "Booting Linux on physical CPU 0x%x\n", mpidr);
460} 467}
461 468
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 9a52a07aa40e..a98b62dca2fa 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -103,7 +103,7 @@ static void broadcast_tlb_a15_erratum(void)
103 103
104static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm) 104static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
105{ 105{
106 int cpu, this_cpu; 106 int this_cpu;
107 cpumask_t mask = { CPU_BITS_NONE }; 107 cpumask_t mask = { CPU_BITS_NONE };
108 108
109 if (!erratum_a15_798181()) 109 if (!erratum_a15_798181())
@@ -111,21 +111,7 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
111 111
112 dummy_flush_tlb_a15_erratum(); 112 dummy_flush_tlb_a15_erratum();
113 this_cpu = get_cpu(); 113 this_cpu = get_cpu();
114 for_each_online_cpu(cpu) { 114 a15_erratum_get_cpumask(this_cpu, mm, &mask);
115 if (cpu == this_cpu)
116 continue;
117 /*
118 * We only need to send an IPI if the other CPUs are running
119 * the same ASID as the one being invalidated. There is no
120 * need for locking around the active_asids check since the
121 * switch_mm() function has at least one dmb() (as required by
122 * this workaround) in case a context switch happens on
123 * another CPU after the condition below.
124 */
125 if (atomic64_read(&mm->context.id) ==
126 atomic64_read(&per_cpu(active_asids, cpu)))
127 cpumask_set_cpu(cpu, &mask);
128 }
129 smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); 115 smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
130 put_cpu(); 116 put_cpu();
131} 117}
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 18b32e8e4497..517bfd4da1c9 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -581,7 +581,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
581 return regs->ARM_r0; 581 return regs->ARM_r0;
582 582
583 case NR(set_tls): 583 case NR(set_tls):
584 thread->tp_value = regs->ARM_r0; 584 thread->tp_value[0] = regs->ARM_r0;
585 if (tls_emu) 585 if (tls_emu)
586 return 0; 586 return 0;
587 if (has_tls_reg) { 587 if (has_tls_reg) {
@@ -699,7 +699,7 @@ static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
699 int reg = (instr >> 12) & 15; 699 int reg = (instr >> 12) & 15;
700 if (reg == 15) 700 if (reg == 15)
701 return 1; 701 return 1;
702 regs->uregs[reg] = current_thread_info()->tp_value; 702 regs->uregs[reg] = current_thread_info()->tp_value[0];
703 regs->ARM_pc += 4; 703 regs->ARM_pc += 4;
704 return 0; 704 return 0;
705} 705}
diff --git a/arch/arm/mach-ebsa110/core.c b/arch/arm/mach-ebsa110/core.c
index b13cc74114db..8a53f346cdb3 100644
--- a/arch/arm/mach-ebsa110/core.c
+++ b/arch/arm/mach-ebsa110/core.c
@@ -116,7 +116,7 @@ static void __init ebsa110_map_io(void)
116 iotable_init(ebsa110_io_desc, ARRAY_SIZE(ebsa110_io_desc)); 116 iotable_init(ebsa110_io_desc, ARRAY_SIZE(ebsa110_io_desc));
117} 117}
118 118
119static void __iomem *ebsa110_ioremap_caller(unsigned long cookie, size_t size, 119static void __iomem *ebsa110_ioremap_caller(phys_addr_t cookie, size_t size,
120 unsigned int flags, void *caller) 120 unsigned int flags, void *caller)
121{ 121{
122 return (void __iomem *)cookie; 122 return (void __iomem *)cookie;
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index e0e69a682174..eed32ca0b8ab 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -65,7 +65,7 @@ static void imx3_idle(void)
65 : "=r" (reg)); 65 : "=r" (reg));
66} 66}
67 67
68static void __iomem *imx3_ioremap_caller(unsigned long phys_addr, size_t size, 68static void __iomem *imx3_ioremap_caller(phys_addr_t phys_addr, size_t size,
69 unsigned int mtype, void *caller) 69 unsigned int mtype, void *caller)
70{ 70{
71 if (mtype == MT_DEVICE) { 71 if (mtype == MT_DEVICE) {
diff --git a/arch/arm/mach-iop13xx/io.c b/arch/arm/mach-iop13xx/io.c
index 183dc8b5511b..faaf7d4482c5 100644
--- a/arch/arm/mach-iop13xx/io.c
+++ b/arch/arm/mach-iop13xx/io.c
@@ -23,7 +23,7 @@
23 23
24#include "pci.h" 24#include "pci.h"
25 25
26static void __iomem *__iop13xx_ioremap_caller(unsigned long cookie, 26static void __iomem *__iop13xx_ioremap_caller(phys_addr_t cookie,
27 size_t size, unsigned int mtype, void *caller) 27 size_t size, unsigned int mtype, void *caller)
28{ 28{
29 void __iomem * retval; 29 void __iomem * retval;
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index 6600cff6bd92..d7223b3b81f3 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -559,7 +559,7 @@ void ixp4xx_restart(char mode, const char *cmd)
559 * fallback to the default. 559 * fallback to the default.
560 */ 560 */
561 561
562static void __iomem *ixp4xx_ioremap_caller(unsigned long addr, size_t size, 562static void __iomem *ixp4xx_ioremap_caller(phys_addr_t addr, size_t size,
563 unsigned int mtype, void *caller) 563 unsigned int mtype, void *caller)
564{ 564{
565 if (!is_pci_memory(addr)) 565 if (!is_pci_memory(addr))
diff --git a/arch/arm/mach-msm/common.h b/arch/arm/mach-msm/common.h
index ce8215a269e5..421cf7751a80 100644
--- a/arch/arm/mach-msm/common.h
+++ b/arch/arm/mach-msm/common.h
@@ -23,7 +23,7 @@ extern void msm_map_msm8x60_io(void);
23extern void msm_map_msm8960_io(void); 23extern void msm_map_msm8960_io(void);
24extern void msm_map_qsd8x50_io(void); 24extern void msm_map_qsd8x50_io(void);
25 25
26extern void __iomem *__msm_ioremap_caller(unsigned long phys_addr, size_t size, 26extern void __iomem *__msm_ioremap_caller(phys_addr_t phys_addr, size_t size,
27 unsigned int mtype, void *caller); 27 unsigned int mtype, void *caller);
28 28
29extern struct smp_operations msm_smp_ops; 29extern struct smp_operations msm_smp_ops;
diff --git a/arch/arm/mach-msm/io.c b/arch/arm/mach-msm/io.c
index 123ef9cbce1b..fd65b6d42cde 100644
--- a/arch/arm/mach-msm/io.c
+++ b/arch/arm/mach-msm/io.c
@@ -172,7 +172,7 @@ void __init msm_map_msm7x30_io(void)
172} 172}
173#endif /* CONFIG_ARCH_MSM7X30 */ 173#endif /* CONFIG_ARCH_MSM7X30 */
174 174
175void __iomem *__msm_ioremap_caller(unsigned long phys_addr, size_t size, 175void __iomem *__msm_ioremap_caller(phys_addr_t phys_addr, size_t size,
176 unsigned int mtype, void *caller) 176 unsigned int mtype, void *caller)
177{ 177{
178 if (mtype == MT_DEVICE) { 178 if (mtype == MT_DEVICE) {
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index f49cd51e162a..8620ab52a4de 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -4,6 +4,7 @@ config ARCH_OMAP
4config ARCH_OMAP2PLUS 4config ARCH_OMAP2PLUS
5 bool "TI OMAP2/3/4/5 SoCs with device tree support" if (ARCH_MULTI_V6 || ARCH_MULTI_V7) 5 bool "TI OMAP2/3/4/5 SoCs with device tree support" if (ARCH_MULTI_V6 || ARCH_MULTI_V7)
6 select ARCH_HAS_CPUFREQ 6 select ARCH_HAS_CPUFREQ
7 select ARCH_HAS_BANDGAP
7 select ARCH_HAS_HOLES_MEMORYMODEL 8 select ARCH_HAS_HOLES_MEMORYMODEL
8 select ARCH_OMAP 9 select ARCH_OMAP
9 select ARCH_REQUIRE_GPIOLIB 10 select ARCH_REQUIRE_GPIOLIB
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index c465faca51b0..d70e0aba0c9d 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -523,6 +523,147 @@ static void aurora_flush_range(unsigned long start, unsigned long end)
523 } 523 }
524} 524}
525 525
526/*
527 * For certain Broadcom SoCs, depending on the address range, different offsets
528 * need to be added to the address before passing it to L2 for
529 * invalidation/clean/flush
530 *
531 * Section Address Range Offset EMI
532 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
533 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
534 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
535 *
536 * When the start and end addresses have crossed two different sections, we
537 * need to break the L2 operation into two, each within its own section.
538 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
539 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
540 * 0xC0000000 - 0xC0001000
541 *
542 * Note 1:
543 * By breaking a single L2 operation into two, we may potentially suffer some
544 * performance hit, but keep in mind the cross section case is very rare
545 *
546 * Note 2:
547 * We do not need to handle the case when the start address is in
548 * Section 1 and the end address is in Section 3, since it is not a valid use
549 * case
550 *
551 * Note 3:
552 * Section 1 in practical terms can no longer be used on rev A2. Because of
553 * that the code does not need to handle section 1 at all.
554 *
555 */
556#define BCM_SYS_EMI_START_ADDR 0x40000000UL
557#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
558
559#define BCM_SYS_EMI_OFFSET 0x40000000UL
560#define BCM_VC_EMI_OFFSET 0x80000000UL
561
562static inline int bcm_addr_is_sys_emi(unsigned long addr)
563{
564 return (addr >= BCM_SYS_EMI_START_ADDR) &&
565 (addr < BCM_VC_EMI_SEC3_START_ADDR);
566}
567
568static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
569{
570 if (bcm_addr_is_sys_emi(addr))
571 return addr + BCM_SYS_EMI_OFFSET;
572 else
573 return addr + BCM_VC_EMI_OFFSET;
574}
575
576static void bcm_inv_range(unsigned long start, unsigned long end)
577{
578 unsigned long new_start, new_end;
579
580 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
581
582 if (unlikely(end <= start))
583 return;
584
585 new_start = bcm_l2_phys_addr(start);
586 new_end = bcm_l2_phys_addr(end);
587
588 /* normal case, no cross section between start and end */
589 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
590 l2x0_inv_range(new_start, new_end);
591 return;
592 }
593
594 /* They cross sections, so it can only be a cross from section
595 * 2 to section 3
596 */
597 l2x0_inv_range(new_start,
598 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
599 l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
600 new_end);
601}
602
603static void bcm_clean_range(unsigned long start, unsigned long end)
604{
605 unsigned long new_start, new_end;
606
607 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
608
609 if (unlikely(end <= start))
610 return;
611
612 if ((end - start) >= l2x0_size) {
613 l2x0_clean_all();
614 return;
615 }
616
617 new_start = bcm_l2_phys_addr(start);
618 new_end = bcm_l2_phys_addr(end);
619
620 /* normal case, no cross section between start and end */
621 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
622 l2x0_clean_range(new_start, new_end);
623 return;
624 }
625
626 /* They cross sections, so it can only be a cross from section
627 * 2 to section 3
628 */
629 l2x0_clean_range(new_start,
630 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
631 l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
632 new_end);
633}
634
635static void bcm_flush_range(unsigned long start, unsigned long end)
636{
637 unsigned long new_start, new_end;
638
639 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
640
641 if (unlikely(end <= start))
642 return;
643
644 if ((end - start) >= l2x0_size) {
645 l2x0_flush_all();
646 return;
647 }
648
649 new_start = bcm_l2_phys_addr(start);
650 new_end = bcm_l2_phys_addr(end);
651
652 /* normal case, no cross section between start and end */
653 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
654 l2x0_flush_range(new_start, new_end);
655 return;
656 }
657
658 /* They cross sections, so it can only be a cross from section
659 * 2 to section 3
660 */
661 l2x0_flush_range(new_start,
662 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
663 l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
664 new_end);
665}
666
526static void __init l2x0_of_setup(const struct device_node *np, 667static void __init l2x0_of_setup(const struct device_node *np,
527 u32 *aux_val, u32 *aux_mask) 668 u32 *aux_val, u32 *aux_mask)
528{ 669{
@@ -765,6 +906,21 @@ static const struct l2x0_of_data aurora_no_outer_data = {
765 }, 906 },
766}; 907};
767 908
909static const struct l2x0_of_data bcm_l2x0_data = {
910 .setup = pl310_of_setup,
911 .save = pl310_save,
912 .outer_cache = {
913 .resume = pl310_resume,
914 .inv_range = bcm_inv_range,
915 .clean_range = bcm_clean_range,
916 .flush_range = bcm_flush_range,
917 .sync = l2x0_cache_sync,
918 .flush_all = l2x0_flush_all,
919 .inv_all = l2x0_inv_all,
920 .disable = l2x0_disable,
921 },
922};
923
768static const struct of_device_id l2x0_ids[] __initconst = { 924static const struct of_device_id l2x0_ids[] __initconst = {
769 { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, 925 { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
770 { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, 926 { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
@@ -773,6 +929,8 @@ static const struct of_device_id l2x0_ids[] __initconst = {
773 .data = (void *)&aurora_no_outer_data}, 929 .data = (void *)&aurora_no_outer_data},
774 { .compatible = "marvell,aurora-outer-cache", 930 { .compatible = "marvell,aurora-outer-cache",
775 .data = (void *)&aurora_with_outer_data}, 931 .data = (void *)&aurora_with_outer_data},
932 { .compatible = "bcm,bcm11351-a2-pl310-cache",
933 .data = (void *)&bcm_l2x0_data},
776 {} 934 {}
777}; 935};
778 936
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 2ac37372ef52..eeab06ebd06e 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -39,19 +39,43 @@
39 * non 64-bit operations. 39 * non 64-bit operations.
40 */ 40 */
41#define ASID_FIRST_VERSION (1ULL << ASID_BITS) 41#define ASID_FIRST_VERSION (1ULL << ASID_BITS)
42#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) 42#define NUM_USER_ASIDS ASID_FIRST_VERSION
43
44#define ASID_TO_IDX(asid) ((asid & ~ASID_MASK) - 1)
45#define IDX_TO_ASID(idx) ((idx + 1) & ~ASID_MASK)
46 43
47static DEFINE_RAW_SPINLOCK(cpu_asid_lock); 44static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
48static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION); 45static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
49static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS); 46static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
50 47
51DEFINE_PER_CPU(atomic64_t, active_asids); 48static DEFINE_PER_CPU(atomic64_t, active_asids);
52static DEFINE_PER_CPU(u64, reserved_asids); 49static DEFINE_PER_CPU(u64, reserved_asids);
53static cpumask_t tlb_flush_pending; 50static cpumask_t tlb_flush_pending;
54 51
52#ifdef CONFIG_ARM_ERRATA_798181
53void a15_erratum_get_cpumask(int this_cpu, struct mm_struct *mm,
54 cpumask_t *mask)
55{
56 int cpu;
57 unsigned long flags;
58 u64 context_id, asid;
59
60 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
61 context_id = mm->context.id.counter;
62 for_each_online_cpu(cpu) {
63 if (cpu == this_cpu)
64 continue;
65 /*
66 * We only need to send an IPI if the other CPUs are
67 * running the same ASID as the one being invalidated.
68 */
69 asid = per_cpu(active_asids, cpu).counter;
70 if (asid == 0)
71 asid = per_cpu(reserved_asids, cpu);
72 if (context_id == asid)
73 cpumask_set_cpu(cpu, mask);
74 }
75 raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
76}
77#endif
78
55#ifdef CONFIG_ARM_LPAE 79#ifdef CONFIG_ARM_LPAE
56static void cpu_set_reserved_ttbr0(void) 80static void cpu_set_reserved_ttbr0(void)
57{ 81{
@@ -128,7 +152,16 @@ static void flush_context(unsigned int cpu)
128 asid = 0; 152 asid = 0;
129 } else { 153 } else {
130 asid = atomic64_xchg(&per_cpu(active_asids, i), 0); 154 asid = atomic64_xchg(&per_cpu(active_asids, i), 0);
131 __set_bit(ASID_TO_IDX(asid), asid_map); 155 /*
156 * If this CPU has already been through a
157 * rollover, but hasn't run another task in
158 * the meantime, we must preserve its reserved
159 * ASID, as this is the only trace we have of
160 * the process it is still running.
161 */
162 if (asid == 0)
163 asid = per_cpu(reserved_asids, i);
164 __set_bit(asid & ~ASID_MASK, asid_map);
132 } 165 }
133 per_cpu(reserved_asids, i) = asid; 166 per_cpu(reserved_asids, i) = asid;
134 } 167 }
@@ -167,17 +200,19 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
167 /* 200 /*
168 * Allocate a free ASID. If we can't find one, take a 201 * Allocate a free ASID. If we can't find one, take a
169 * note of the currently active ASIDs and mark the TLBs 202 * note of the currently active ASIDs and mark the TLBs
170 * as requiring flushes. 203 * as requiring flushes. We always count from ASID #1,
204 * as we reserve ASID #0 to switch via TTBR0 and indicate
205 * rollover events.
171 */ 206 */
172 asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); 207 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
173 if (asid == NUM_USER_ASIDS) { 208 if (asid == NUM_USER_ASIDS) {
174 generation = atomic64_add_return(ASID_FIRST_VERSION, 209 generation = atomic64_add_return(ASID_FIRST_VERSION,
175 &asid_generation); 210 &asid_generation);
176 flush_context(cpu); 211 flush_context(cpu);
177 asid = find_first_zero_bit(asid_map, NUM_USER_ASIDS); 212 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
178 } 213 }
179 __set_bit(asid, asid_map); 214 __set_bit(asid, asid_map);
180 asid = generation | IDX_TO_ASID(asid); 215 asid |= generation;
181 cpumask_clear(mm_cpumask(mm)); 216 cpumask_clear(mm_cpumask(mm));
182 } 217 }
183 218
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ef3e0f3aac96..c038ec0738ac 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -880,10 +880,24 @@ static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
880 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); 880 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
881 881
882 /* 882 /*
883 * Mark the D-cache clean for this page to avoid extra flushing. 883 * Mark the D-cache clean for these pages to avoid extra flushing.
884 */ 884 */
885 if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) 885 if (dir != DMA_TO_DEVICE && size >= PAGE_SIZE) {
886 set_bit(PG_dcache_clean, &page->flags); 886 unsigned long pfn;
887 size_t left = size;
888
889 pfn = page_to_pfn(page) + off / PAGE_SIZE;
890 off %= PAGE_SIZE;
891 if (off) {
892 pfn++;
893 left -= PAGE_SIZE - off;
894 }
895 while (left >= PAGE_SIZE) {
896 page = pfn_to_page(pfn++);
897 set_bit(PG_dcache_clean, &page->flags);
898 left -= PAGE_SIZE;
899 }
900 }
887} 901}
888 902
889/** 903/**
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 32aa5861119f..e4ac5d8278e1 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -287,7 +287,7 @@ void flush_dcache_page(struct page *page)
287 mapping = page_mapping(page); 287 mapping = page_mapping(page);
288 288
289 if (!cache_ops_need_broadcast() && 289 if (!cache_ops_need_broadcast() &&
290 mapping && !mapping_mapped(mapping)) 290 mapping && !page_mapped(page))
291 clear_bit(PG_dcache_clean, &page->flags); 291 clear_bit(PG_dcache_clean, &page->flags);
292 else { 292 else {
293 __flush_dcache_page(mapping, page); 293 __flush_dcache_page(mapping, page);
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 04d9006eab1f..f123d6eb074b 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -331,10 +331,10 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
331 return (void __iomem *) (offset + addr); 331 return (void __iomem *) (offset + addr);
332} 332}
333 333
334void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, 334void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
335 unsigned int mtype, void *caller) 335 unsigned int mtype, void *caller)
336{ 336{
337 unsigned long last_addr; 337 phys_addr_t last_addr;
338 unsigned long offset = phys_addr & ~PAGE_MASK; 338 unsigned long offset = phys_addr & ~PAGE_MASK;
339 unsigned long pfn = __phys_to_pfn(phys_addr); 339 unsigned long pfn = __phys_to_pfn(phys_addr);
340 340
@@ -367,12 +367,12 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
367} 367}
368EXPORT_SYMBOL(__arm_ioremap_pfn); 368EXPORT_SYMBOL(__arm_ioremap_pfn);
369 369
370void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, 370void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
371 unsigned int, void *) = 371 unsigned int, void *) =
372 __arm_ioremap_caller; 372 __arm_ioremap_caller;
373 373
374void __iomem * 374void __iomem *
375__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) 375__arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype)
376{ 376{
377 return arch_ioremap_caller(phys_addr, size, mtype, 377 return arch_ioremap_caller(phys_addr, size, mtype,
378 __builtin_return_address(0)); 378 __builtin_return_address(0));
@@ -387,7 +387,7 @@ EXPORT_SYMBOL(__arm_ioremap);
387 * CONFIG_GENERIC_ALLOCATOR for allocating external memory. 387 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
388 */ 388 */
389void __iomem * 389void __iomem *
390__arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) 390__arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
391{ 391{
392 unsigned int mtype; 392 unsigned int mtype;
393 393
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index eb5293a69a84..7fe0524a5449 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -87,16 +87,16 @@ void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset,
87 return __arm_ioremap_pfn(pfn, offset, size, mtype); 87 return __arm_ioremap_pfn(pfn, offset, size, mtype);
88} 88}
89 89
90void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, 90void __iomem *__arm_ioremap(phys_addr_t phys_addr, size_t size,
91 unsigned int mtype) 91 unsigned int mtype)
92{ 92{
93 return (void __iomem *)phys_addr; 93 return (void __iomem *)phys_addr;
94} 94}
95EXPORT_SYMBOL(__arm_ioremap); 95EXPORT_SYMBOL(__arm_ioremap);
96 96
97void __iomem * (*arch_ioremap_caller)(unsigned long, size_t, unsigned int, void *); 97void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
98 98
99void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, 99void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
100 unsigned int mtype, void *caller) 100 unsigned int mtype, void *caller)
101{ 101{
102 return __arm_ioremap(phys_addr, size, mtype); 102 return __arm_ioremap(phys_addr, size, mtype);
diff --git a/arch/arm/plat-versatile/headsmp.S b/arch/arm/plat-versatile/headsmp.S
index b178d44e9eaa..2677bc3762d7 100644
--- a/arch/arm/plat-versatile/headsmp.S
+++ b/arch/arm/plat-versatile/headsmp.S
@@ -11,8 +11,6 @@
11#include <linux/linkage.h> 11#include <linux/linkage.h>
12#include <linux/init.h> 12#include <linux/init.h>
13 13
14 __INIT
15
16/* 14/*
17 * Realview/Versatile Express specific entry point for secondary CPUs. 15 * Realview/Versatile Express specific entry point for secondary CPUs.
18 * This provides a "holding pen" into which all secondary cores are held 16 * This provides a "holding pen" into which all secondary cores are held