aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/Kconfig6
-rw-r--r--arch/arm/Kconfig.debug41
-rw-r--r--arch/arm/common/mcpm_entry.c15
-rw-r--r--arch/arm/common/mcpm_platsmp.c27
-rw-r--r--arch/arm/common/timer-sp.c2
-rw-r--r--arch/arm/include/asm/atomic.h76
-rw-r--r--arch/arm/include/asm/cacheflush.h46
-rw-r--r--arch/arm/include/asm/cmpxchg.h58
-rw-r--r--arch/arm/include/asm/cputype.h1
-rw-r--r--arch/arm/include/asm/hardirq.h2
-rw-r--r--arch/arm/include/asm/mcpm.h31
-rw-r--r--arch/arm/include/asm/pgtable-2level.h7
-rw-r--r--arch/arm/include/asm/pgtable-3level.h3
-rw-r--r--arch/arm/include/asm/setup.h2
-rw-r--r--arch/arm/include/asm/spinlock.h8
-rw-r--r--arch/arm/include/asm/tlbflush.h48
-rw-r--r--arch/arm/include/debug/efm32.S45
-rw-r--r--arch/arm/include/debug/msm.S5
-rw-r--r--arch/arm/kernel/hw_breakpoint.c14
-rw-r--r--arch/arm/kernel/kprobes.c8
-rw-r--r--arch/arm/kernel/perf_event_cpu.c2
-rw-r--r--arch/arm/kernel/setup.c24
-rw-r--r--arch/arm/kernel/smp.c19
-rw-r--r--arch/arm/kernel/smp_tlb.c36
-rw-r--r--arch/arm/kvm/arm.c6
-rw-r--r--arch/arm/lib/uaccess_with_memcpy.c41
-rw-r--r--arch/arm/mach-footbridge/netwinder-hw.c8
-rw-r--r--arch/arm/mach-vexpress/dcscb.c56
-rw-r--r--arch/arm/mach-vexpress/tc2_pm.c48
-rw-r--r--arch/arm/mm/mmap.c6
-rw-r--r--arch/arm64/include/asm/atomic.h14
-rw-r--r--arch/arm64/kernel/debug-monitors.c13
-rw-r--r--arch/arm64/kernel/hw_breakpoint.c22
-rw-r--r--arch/arm64/kernel/perf_event.c4
-rw-r--r--include/linux/amba/bus.h2
35 files changed, 467 insertions, 279 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 1ad6fb6c094d..df0c609272e5 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -5,6 +5,7 @@ config ARM
5 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 5 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
6 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 6 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
7 select ARCH_HAVE_CUSTOM_GPIO_H 7 select ARCH_HAVE_CUSTOM_GPIO_H
8 select ARCH_USE_CMPXCHG_LOCKREF
8 select ARCH_WANT_IPC_PARSE_VERSION 9 select ARCH_WANT_IPC_PARSE_VERSION
9 select BUILDTIME_EXTABLE_SORT if MMU 10 select BUILDTIME_EXTABLE_SORT if MMU
10 select CLONE_BACKWARDS 11 select CLONE_BACKWARDS
@@ -1091,11 +1092,6 @@ config IWMMXT
1091 Enable support for iWMMXt context switching at run time if 1092 Enable support for iWMMXt context switching at run time if
1092 running on a CPU that supports it. 1093 running on a CPU that supports it.
1093 1094
1094config XSCALE_PMU
1095 bool
1096 depends on CPU_XSCALE
1097 default y
1098
1099config MULTI_IRQ_HANDLER 1095config MULTI_IRQ_HANDLER
1100 bool 1096 bool
1101 help 1097 help
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 9762c84b4198..a8f305b07f29 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -318,6 +318,7 @@ choice
318 config DEBUG_MSM_UART1 318 config DEBUG_MSM_UART1
319 bool "Kernel low-level debugging messages via MSM UART1" 319 bool "Kernel low-level debugging messages via MSM UART1"
320 depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50 320 depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
321 select DEBUG_MSM_UART
321 help 322 help
322 Say Y here if you want the debug print routines to direct 323 Say Y here if you want the debug print routines to direct
323 their output to the first serial port on MSM devices. 324 their output to the first serial port on MSM devices.
@@ -325,6 +326,7 @@ choice
325 config DEBUG_MSM_UART2 326 config DEBUG_MSM_UART2
326 bool "Kernel low-level debugging messages via MSM UART2" 327 bool "Kernel low-level debugging messages via MSM UART2"
327 depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50 328 depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
329 select DEBUG_MSM_UART
328 help 330 help
329 Say Y here if you want the debug print routines to direct 331 Say Y here if you want the debug print routines to direct
330 their output to the second serial port on MSM devices. 332 their output to the second serial port on MSM devices.
@@ -332,6 +334,7 @@ choice
332 config DEBUG_MSM_UART3 334 config DEBUG_MSM_UART3
333 bool "Kernel low-level debugging messages via MSM UART3" 335 bool "Kernel low-level debugging messages via MSM UART3"
334 depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50 336 depends on ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50
337 select DEBUG_MSM_UART
335 help 338 help
336 Say Y here if you want the debug print routines to direct 339 Say Y here if you want the debug print routines to direct
337 their output to the third serial port on MSM devices. 340 their output to the third serial port on MSM devices.
@@ -340,6 +343,7 @@ choice
340 bool "Kernel low-level debugging messages via MSM 8660 UART" 343 bool "Kernel low-level debugging messages via MSM 8660 UART"
341 depends on ARCH_MSM8X60 344 depends on ARCH_MSM8X60
342 select MSM_HAS_DEBUG_UART_HS 345 select MSM_HAS_DEBUG_UART_HS
346 select DEBUG_MSM_UART
343 help 347 help
344 Say Y here if you want the debug print routines to direct 348 Say Y here if you want the debug print routines to direct
345 their output to the serial port on MSM 8660 devices. 349 their output to the serial port on MSM 8660 devices.
@@ -348,10 +352,20 @@ choice
348 bool "Kernel low-level debugging messages via MSM 8960 UART" 352 bool "Kernel low-level debugging messages via MSM 8960 UART"
349 depends on ARCH_MSM8960 353 depends on ARCH_MSM8960
350 select MSM_HAS_DEBUG_UART_HS 354 select MSM_HAS_DEBUG_UART_HS
355 select DEBUG_MSM_UART
351 help 356 help
352 Say Y here if you want the debug print routines to direct 357 Say Y here if you want the debug print routines to direct
353 their output to the serial port on MSM 8960 devices. 358 their output to the serial port on MSM 8960 devices.
354 359
360 config DEBUG_MSM8974_UART
361 bool "Kernel low-level debugging messages via MSM 8974 UART"
362 depends on ARCH_MSM8974
363 select MSM_HAS_DEBUG_UART_HS
364 select DEBUG_MSM_UART
365 help
366 Say Y here if you want the debug print routines to direct
367 their output to the serial port on MSM 8974 devices.
368
355 config DEBUG_MVEBU_UART 369 config DEBUG_MVEBU_UART
356 bool "Kernel low-level debugging messages via MVEBU UART (old bootloaders)" 370 bool "Kernel low-level debugging messages via MVEBU UART (old bootloaders)"
357 depends on ARCH_MVEBU 371 depends on ARCH_MVEBU
@@ -834,6 +848,20 @@ choice
834 options; the platform specific options are deprecated 848 options; the platform specific options are deprecated
835 and will be soon removed. 849 and will be soon removed.
836 850
851 config DEBUG_LL_UART_EFM32
852 bool "Kernel low-level debugging via efm32 UART"
853 depends on ARCH_EFM32
854 help
855 Say Y here if you want the debug print routines to direct
856 their output to an UART or USART port on efm32 based
857 machines. Use the following addresses for DEBUG_UART_PHYS:
858
859 0x4000c000 | USART0
860 0x4000c400 | USART1
861 0x4000c800 | USART2
862 0x4000e000 | UART0
863 0x4000e400 | UART1
864
837 config DEBUG_LL_UART_PL01X 865 config DEBUG_LL_UART_PL01X
838 bool "Kernel low-level debugging via ARM Ltd PL01x Primecell UART" 866 bool "Kernel low-level debugging via ARM Ltd PL01x Primecell UART"
839 help 867 help
@@ -880,11 +908,16 @@ config DEBUG_STI_UART
880 bool 908 bool
881 depends on ARCH_STI 909 depends on ARCH_STI
882 910
911config DEBUG_MSM_UART
912 bool
913 depends on ARCH_MSM
914
883config DEBUG_LL_INCLUDE 915config DEBUG_LL_INCLUDE
884 string 916 string
885 default "debug/8250.S" if DEBUG_LL_UART_8250 || DEBUG_UART_8250 917 default "debug/8250.S" if DEBUG_LL_UART_8250 || DEBUG_UART_8250
886 default "debug/pl01x.S" if DEBUG_LL_UART_PL01X || DEBUG_UART_PL01X 918 default "debug/pl01x.S" if DEBUG_LL_UART_PL01X || DEBUG_UART_PL01X
887 default "debug/exynos.S" if DEBUG_EXYNOS_UART 919 default "debug/exynos.S" if DEBUG_EXYNOS_UART
920 default "debug/efm32.S" if DEBUG_LL_UART_EFM32
888 default "debug/icedcc.S" if DEBUG_ICEDCC 921 default "debug/icedcc.S" if DEBUG_ICEDCC
889 default "debug/imx.S" if DEBUG_IMX1_UART || \ 922 default "debug/imx.S" if DEBUG_IMX1_UART || \
890 DEBUG_IMX25_UART || \ 923 DEBUG_IMX25_UART || \
@@ -895,11 +928,7 @@ config DEBUG_LL_INCLUDE
895 DEBUG_IMX53_UART ||\ 928 DEBUG_IMX53_UART ||\
896 DEBUG_IMX6Q_UART || \ 929 DEBUG_IMX6Q_UART || \
897 DEBUG_IMX6SL_UART 930 DEBUG_IMX6SL_UART
898 default "debug/msm.S" if DEBUG_MSM_UART1 || \ 931 default "debug/msm.S" if DEBUG_MSM_UART
899 DEBUG_MSM_UART2 || \
900 DEBUG_MSM_UART3 || \
901 DEBUG_MSM8660_UART || \
902 DEBUG_MSM8960_UART
903 default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART 932 default "debug/omap2plus.S" if DEBUG_OMAP2PLUS_UART
904 default "debug/sirf.S" if DEBUG_SIRFPRIMA2_UART1 || DEBUG_SIRFMARCO_UART1 933 default "debug/sirf.S" if DEBUG_SIRFPRIMA2_UART1 || DEBUG_SIRFMARCO_UART1
905 default "debug/sti.S" if DEBUG_STI_UART 934 default "debug/sti.S" if DEBUG_STI_UART
@@ -951,6 +980,7 @@ config DEBUG_UART_PHYS
951 default 0x20064000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2 980 default 0x20064000 if DEBUG_RK29_UART1 || DEBUG_RK3X_UART2
952 default 0x20068000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3 981 default 0x20068000 if DEBUG_RK29_UART2 || DEBUG_RK3X_UART3
953 default 0x20201000 if DEBUG_BCM2835 982 default 0x20201000 if DEBUG_BCM2835
983 default 0x4000e400 if DEBUG_LL_UART_EFM32
954 default 0x40090000 if ARCH_LPC32XX 984 default 0x40090000 if ARCH_LPC32XX
955 default 0x40100000 if DEBUG_PXA_UART1 985 default 0x40100000 if DEBUG_PXA_UART1
956 default 0x42000000 if ARCH_GEMINI 986 default 0x42000000 if ARCH_GEMINI
@@ -981,6 +1011,7 @@ config DEBUG_UART_PHYS
981 default 0xfff36000 if DEBUG_HIGHBANK_UART 1011 default 0xfff36000 if DEBUG_HIGHBANK_UART
982 default 0xfffff700 if ARCH_IOP33X 1012 default 0xfffff700 if ARCH_IOP33X
983 depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \ 1013 depends on DEBUG_LL_UART_8250 || DEBUG_LL_UART_PL01X || \
1014 DEBUG_LL_UART_EFM32 || \
984 DEBUG_UART_8250 || DEBUG_UART_PL01X 1015 DEBUG_UART_8250 || DEBUG_UART_PL01X
985 1016
986config DEBUG_UART_VIRT 1017config DEBUG_UART_VIRT
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c
index 990250965f2c..6c03d0152e7f 100644
--- a/arch/arm/common/mcpm_entry.c
+++ b/arch/arm/common/mcpm_entry.c
@@ -90,6 +90,21 @@ void mcpm_cpu_power_down(void)
90 BUG(); 90 BUG();
91} 91}
92 92
93int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster)
94{
95 int ret;
96
97 if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down_finish))
98 return -EUNATCH;
99
100 ret = platform_ops->power_down_finish(cpu, cluster);
101 if (ret)
102 pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n",
103 __func__, cpu, cluster, ret);
104
105 return ret;
106}
107
93void mcpm_cpu_suspend(u64 expected_residency) 108void mcpm_cpu_suspend(u64 expected_residency)
94{ 109{
95 phys_reset_t phys_reset; 110 phys_reset_t phys_reset;
diff --git a/arch/arm/common/mcpm_platsmp.c b/arch/arm/common/mcpm_platsmp.c
index 1bc34c7567fd..177251a4dd9a 100644
--- a/arch/arm/common/mcpm_platsmp.c
+++ b/arch/arm/common/mcpm_platsmp.c
@@ -19,14 +19,23 @@
19#include <asm/smp.h> 19#include <asm/smp.h>
20#include <asm/smp_plat.h> 20#include <asm/smp_plat.h>
21 21
22static void cpu_to_pcpu(unsigned int cpu,
23 unsigned int *pcpu, unsigned int *pcluster)
24{
25 unsigned int mpidr;
26
27 mpidr = cpu_logical_map(cpu);
28 *pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
29 *pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
30}
31
22static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle) 32static int mcpm_boot_secondary(unsigned int cpu, struct task_struct *idle)
23{ 33{
24 unsigned int mpidr, pcpu, pcluster, ret; 34 unsigned int pcpu, pcluster, ret;
25 extern void secondary_startup(void); 35 extern void secondary_startup(void);
26 36
27 mpidr = cpu_logical_map(cpu); 37 cpu_to_pcpu(cpu, &pcpu, &pcluster);
28 pcpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); 38
29 pcluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
30 pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n", 39 pr_debug("%s: logical CPU %d is physical CPU %d cluster %d\n",
31 __func__, cpu, pcpu, pcluster); 40 __func__, cpu, pcpu, pcluster);
32 41
@@ -47,6 +56,15 @@ static void mcpm_secondary_init(unsigned int cpu)
47 56
48#ifdef CONFIG_HOTPLUG_CPU 57#ifdef CONFIG_HOTPLUG_CPU
49 58
59static int mcpm_cpu_kill(unsigned int cpu)
60{
61 unsigned int pcpu, pcluster;
62
63 cpu_to_pcpu(cpu, &pcpu, &pcluster);
64
65 return !mcpm_cpu_power_down_finish(pcpu, pcluster);
66}
67
50static int mcpm_cpu_disable(unsigned int cpu) 68static int mcpm_cpu_disable(unsigned int cpu)
51{ 69{
52 /* 70 /*
@@ -73,6 +91,7 @@ static struct smp_operations __initdata mcpm_smp_ops = {
73 .smp_boot_secondary = mcpm_boot_secondary, 91 .smp_boot_secondary = mcpm_boot_secondary,
74 .smp_secondary_init = mcpm_secondary_init, 92 .smp_secondary_init = mcpm_secondary_init,
75#ifdef CONFIG_HOTPLUG_CPU 93#ifdef CONFIG_HOTPLUG_CPU
94 .cpu_kill = mcpm_cpu_kill,
76 .cpu_disable = mcpm_cpu_disable, 95 .cpu_disable = mcpm_cpu_disable,
77 .cpu_die = mcpm_cpu_die, 96 .cpu_die = mcpm_cpu_die,
78#endif 97#endif
diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c
index e901d0f3e0bb..ce922d0ea7aa 100644
--- a/arch/arm/common/timer-sp.c
+++ b/arch/arm/common/timer-sp.c
@@ -175,7 +175,7 @@ static struct clock_event_device sp804_clockevent = {
175 175
176static struct irqaction sp804_timer_irq = { 176static struct irqaction sp804_timer_irq = {
177 .name = "timer", 177 .name = "timer",
178 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, 178 .flags = IRQF_TIMER | IRQF_IRQPOLL,
179 .handler = sp804_timer_interrupt, 179 .handler = sp804_timer_interrupt,
180 .dev_id = &sp804_clockevent, 180 .dev_id = &sp804_clockevent,
181}; 181};
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index da1c77d39327..f8a4336ed8fc 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -114,7 +114,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
114 114
115static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) 115static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
116{ 116{
117 unsigned long oldval, res; 117 int oldval;
118 unsigned long res;
118 119
119 smp_mb(); 120 smp_mb();
120 121
@@ -134,21 +135,6 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
134 return oldval; 135 return oldval;
135} 136}
136 137
137static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
138{
139 unsigned long tmp, tmp2;
140
141 __asm__ __volatile__("@ atomic_clear_mask\n"
142"1: ldrex %0, [%3]\n"
143" bic %0, %0, %4\n"
144" strex %1, %0, [%3]\n"
145" teq %1, #0\n"
146" bne 1b"
147 : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
148 : "r" (addr), "Ir" (mask)
149 : "cc");
150}
151
152#else /* ARM_ARCH_6 */ 138#else /* ARM_ARCH_6 */
153 139
154#ifdef CONFIG_SMP 140#ifdef CONFIG_SMP
@@ -197,15 +183,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
197 return ret; 183 return ret;
198} 184}
199 185
200static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
201{
202 unsigned long flags;
203
204 raw_local_irq_save(flags);
205 *addr &= ~mask;
206 raw_local_irq_restore(flags);
207}
208
209#endif /* __LINUX_ARM_ARCH__ */ 186#endif /* __LINUX_ARM_ARCH__ */
210 187
211#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 188#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -238,15 +215,15 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
238 215
239#ifndef CONFIG_GENERIC_ATOMIC64 216#ifndef CONFIG_GENERIC_ATOMIC64
240typedef struct { 217typedef struct {
241 u64 __aligned(8) counter; 218 long long counter;
242} atomic64_t; 219} atomic64_t;
243 220
244#define ATOMIC64_INIT(i) { (i) } 221#define ATOMIC64_INIT(i) { (i) }
245 222
246#ifdef CONFIG_ARM_LPAE 223#ifdef CONFIG_ARM_LPAE
247static inline u64 atomic64_read(const atomic64_t *v) 224static inline long long atomic64_read(const atomic64_t *v)
248{ 225{
249 u64 result; 226 long long result;
250 227
251 __asm__ __volatile__("@ atomic64_read\n" 228 __asm__ __volatile__("@ atomic64_read\n"
252" ldrd %0, %H0, [%1]" 229" ldrd %0, %H0, [%1]"
@@ -257,7 +234,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
257 return result; 234 return result;
258} 235}
259 236
260static inline void atomic64_set(atomic64_t *v, u64 i) 237static inline void atomic64_set(atomic64_t *v, long long i)
261{ 238{
262 __asm__ __volatile__("@ atomic64_set\n" 239 __asm__ __volatile__("@ atomic64_set\n"
263" strd %2, %H2, [%1]" 240" strd %2, %H2, [%1]"
@@ -266,9 +243,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
266 ); 243 );
267} 244}
268#else 245#else
269static inline u64 atomic64_read(const atomic64_t *v) 246static inline long long atomic64_read(const atomic64_t *v)
270{ 247{
271 u64 result; 248 long long result;
272 249
273 __asm__ __volatile__("@ atomic64_read\n" 250 __asm__ __volatile__("@ atomic64_read\n"
274" ldrexd %0, %H0, [%1]" 251" ldrexd %0, %H0, [%1]"
@@ -279,9 +256,9 @@ static inline u64 atomic64_read(const atomic64_t *v)
279 return result; 256 return result;
280} 257}
281 258
282static inline void atomic64_set(atomic64_t *v, u64 i) 259static inline void atomic64_set(atomic64_t *v, long long i)
283{ 260{
284 u64 tmp; 261 long long tmp;
285 262
286 __asm__ __volatile__("@ atomic64_set\n" 263 __asm__ __volatile__("@ atomic64_set\n"
287"1: ldrexd %0, %H0, [%2]\n" 264"1: ldrexd %0, %H0, [%2]\n"
@@ -294,9 +271,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
294} 271}
295#endif 272#endif
296 273
297static inline void atomic64_add(u64 i, atomic64_t *v) 274static inline void atomic64_add(long long i, atomic64_t *v)
298{ 275{
299 u64 result; 276 long long result;
300 unsigned long tmp; 277 unsigned long tmp;
301 278
302 __asm__ __volatile__("@ atomic64_add\n" 279 __asm__ __volatile__("@ atomic64_add\n"
@@ -311,9 +288,9 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
311 : "cc"); 288 : "cc");
312} 289}
313 290
314static inline u64 atomic64_add_return(u64 i, atomic64_t *v) 291static inline long long atomic64_add_return(long long i, atomic64_t *v)
315{ 292{
316 u64 result; 293 long long result;
317 unsigned long tmp; 294 unsigned long tmp;
318 295
319 smp_mb(); 296 smp_mb();
@@ -334,9 +311,9 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
334 return result; 311 return result;
335} 312}
336 313
337static inline void atomic64_sub(u64 i, atomic64_t *v) 314static inline void atomic64_sub(long long i, atomic64_t *v)
338{ 315{
339 u64 result; 316 long long result;
340 unsigned long tmp; 317 unsigned long tmp;
341 318
342 __asm__ __volatile__("@ atomic64_sub\n" 319 __asm__ __volatile__("@ atomic64_sub\n"
@@ -351,9 +328,9 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
351 : "cc"); 328 : "cc");
352} 329}
353 330
354static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) 331static inline long long atomic64_sub_return(long long i, atomic64_t *v)
355{ 332{
356 u64 result; 333 long long result;
357 unsigned long tmp; 334 unsigned long tmp;
358 335
359 smp_mb(); 336 smp_mb();
@@ -374,9 +351,10 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
374 return result; 351 return result;
375} 352}
376 353
377static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) 354static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
355 long long new)
378{ 356{
379 u64 oldval; 357 long long oldval;
380 unsigned long res; 358 unsigned long res;
381 359
382 smp_mb(); 360 smp_mb();
@@ -398,9 +376,9 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
398 return oldval; 376 return oldval;
399} 377}
400 378
401static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) 379static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
402{ 380{
403 u64 result; 381 long long result;
404 unsigned long tmp; 382 unsigned long tmp;
405 383
406 smp_mb(); 384 smp_mb();
@@ -419,9 +397,9 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
419 return result; 397 return result;
420} 398}
421 399
422static inline u64 atomic64_dec_if_positive(atomic64_t *v) 400static inline long long atomic64_dec_if_positive(atomic64_t *v)
423{ 401{
424 u64 result; 402 long long result;
425 unsigned long tmp; 403 unsigned long tmp;
426 404
427 smp_mb(); 405 smp_mb();
@@ -445,9 +423,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
445 return result; 423 return result;
446} 424}
447 425
448static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) 426static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
449{ 427{
450 u64 val; 428 long long val;
451 unsigned long tmp; 429 unsigned long tmp;
452 int ret = 1; 430 int ret = 1;
453 431
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 15f2d5bf8875..ee753f1749cd 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -435,4 +435,50 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
435#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) 435#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
436#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) 436#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
437 437
438/*
439 * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
440 * To do so we must:
441 *
442 * - Clear the SCTLR.C bit to prevent further cache allocations
443 * - Flush the desired level of cache
444 * - Clear the ACTLR "SMP" bit to disable local coherency
445 *
446 * ... and so without any intervening memory access in between those steps,
447 * not even to the stack.
448 *
449 * WARNING -- After this has been called:
450 *
451 * - No ldrex/strex (and similar) instructions must be used.
452 * - The CPU is obviously no longer coherent with the other CPUs.
453 * - This is unlikely to work as expected if Linux is running non-secure.
454 *
455 * Note:
456 *
457 * - This is known to apply to several ARMv7 processor implementations,
458 * however some exceptions may exist. Caveat emptor.
459 *
460 * - The clobber list is dictated by the call to v7_flush_dcache_*.
461 * fp is preserved to the stack explicitly prior disabling the cache
462 * since adding it to the clobber list is incompatible with having
463 * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
464 * trampoline are inserted by the linker and to keep sp 64-bit aligned.
465 */
466#define v7_exit_coherency_flush(level) \
467 asm volatile( \
468 "stmfd sp!, {fp, ip} \n\t" \
469 "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
470 "bic r0, r0, #"__stringify(CR_C)" \n\t" \
471 "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
472 "isb \n\t" \
473 "bl v7_flush_dcache_"__stringify(level)" \n\t" \
474 "clrex \n\t" \
475 "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
476 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
477 "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
478 "isb \n\t" \
479 "dsb \n\t" \
480 "ldmfd sp!, {fp, ip}" \
481 : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
482 "r9","r10","lr","memory" )
483
438#endif 484#endif
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 4f009c10540d..df2fbba7efc8 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -223,6 +223,42 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
223 return ret; 223 return ret;
224} 224}
225 225
226static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
227 unsigned long long old,
228 unsigned long long new)
229{
230 unsigned long long oldval;
231 unsigned long res;
232
233 __asm__ __volatile__(
234"1: ldrexd %1, %H1, [%3]\n"
235" teq %1, %4\n"
236" teqeq %H1, %H4\n"
237" bne 2f\n"
238" strexd %0, %5, %H5, [%3]\n"
239" teq %0, #0\n"
240" bne 1b\n"
241"2:"
242 : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
243 : "r" (ptr), "r" (old), "r" (new)
244 : "cc");
245
246 return oldval;
247}
248
249static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
250 unsigned long long old,
251 unsigned long long new)
252{
253 unsigned long long ret;
254
255 smp_mb();
256 ret = __cmpxchg64(ptr, old, new);
257 smp_mb();
258
259 return ret;
260}
261
226#define cmpxchg_local(ptr,o,n) \ 262#define cmpxchg_local(ptr,o,n) \
227 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ 263 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
228 (unsigned long)(o), \ 264 (unsigned long)(o), \
@@ -230,18 +266,16 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
230 sizeof(*(ptr)))) 266 sizeof(*(ptr))))
231 267
232#define cmpxchg64(ptr, o, n) \ 268#define cmpxchg64(ptr, o, n) \
233 ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \ 269 ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
234 atomic64_t, \ 270 (unsigned long long)(o), \
235 counter), \ 271 (unsigned long long)(n)))
236 (unsigned long long)(o), \ 272
237 (unsigned long long)(n))) 273#define cmpxchg64_relaxed(ptr, o, n) \
238 274 ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
239#define cmpxchg64_local(ptr, o, n) \ 275 (unsigned long long)(o), \
240 ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \ 276 (unsigned long long)(n)))
241 local64_t, \ 277
242 a), \ 278#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
243 (unsigned long long)(o), \
244 (unsigned long long)(n)))
245 279
246#endif /* __LINUX_ARM_ARCH__ >= 6 */ 280#endif /* __LINUX_ARM_ARCH__ >= 6 */
247 281
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 9672e978d50d..acdde76b39bb 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -10,6 +10,7 @@
10#define CPUID_TLBTYPE 3 10#define CPUID_TLBTYPE 3
11#define CPUID_MPUIR 4 11#define CPUID_MPUIR 4
12#define CPUID_MPIDR 5 12#define CPUID_MPIDR 5
13#define CPUID_REVIDR 6
13 14
14#ifdef CONFIG_CPU_V7M 15#ifdef CONFIG_CPU_V7M
15#define CPUID_EXT_PFR0 0x40 16#define CPUID_EXT_PFR0 0x40
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 2740c2a2df63..3d7351c844aa 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,7 +5,7 @@
5#include <linux/threads.h> 5#include <linux/threads.h>
6#include <asm/irq.h> 6#include <asm/irq.h>
7 7
8#define NR_IPI 6 8#define NR_IPI 7
9 9
10typedef struct { 10typedef struct {
11 unsigned int __softirq_pending; 11 unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index fc82a88f5b69..1cf26010a6f3 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -81,10 +81,40 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
81 * 81 *
82 * This will return if mcpm_platform_register() has not been called 82 * This will return if mcpm_platform_register() has not been called
83 * previously in which case the caller should take appropriate action. 83 * previously in which case the caller should take appropriate action.
84 *
85 * On success, the CPU is not guaranteed to be truly halted until
86 * mcpm_cpu_power_down_finish() subsequently returns non-zero for the
87 * specified cpu. Until then, other CPUs should make sure they do not
88 * trash memory the target CPU might be executing/accessing.
84 */ 89 */
85void mcpm_cpu_power_down(void); 90void mcpm_cpu_power_down(void);
86 91
87/** 92/**
93 * mcpm_cpu_power_down_finish - wait for a specified CPU to halt, and
94 * make sure it is powered off
95 *
96 * @cpu: CPU number within given cluster
97 * @cluster: cluster number for the CPU
98 *
99 * Call this function to ensure that a pending powerdown has taken
100 * effect and the CPU is safely parked before performing non-mcpm
101 * operations that may affect the CPU (such as kexec trashing the
102 * kernel text).
103 *
104 * It is *not* necessary to call this function if you only need to
105 * serialise a pending powerdown with mcpm_cpu_power_up() or a wakeup
106 * event.
107 *
108 * Do not call this function unless the specified CPU has already
109 * called mcpm_cpu_power_down() or has committed to doing so.
110 *
111 * @return:
112 * - zero if the CPU is in a safely parked state
113 * - nonzero otherwise (e.g., timeout)
114 */
115int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster);
116
117/**
88 * mcpm_cpu_suspend - bring the calling CPU in a suspended state 118 * mcpm_cpu_suspend - bring the calling CPU in a suspended state
89 * 119 *
90 * @expected_residency: duration in microseconds the CPU is expected 120 * @expected_residency: duration in microseconds the CPU is expected
@@ -126,6 +156,7 @@ int mcpm_cpu_powered_up(void);
126struct mcpm_platform_ops { 156struct mcpm_platform_ops {
127 int (*power_up)(unsigned int cpu, unsigned int cluster); 157 int (*power_up)(unsigned int cpu, unsigned int cluster);
128 void (*power_down)(void); 158 void (*power_down)(void);
159 int (*power_down_finish)(unsigned int cpu, unsigned int cluster);
129 void (*suspend)(u64); 160 void (*suspend)(u64);
130 void (*powered_up)(void); 161 void (*powered_up)(void);
131}; 162};
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index f97ee02386ee..86a659a19526 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -181,6 +181,13 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
181 181
182#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) 182#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
183 183
184/*
185 * We don't have huge page support for short descriptors, for the moment
186 * define empty stubs for use by pin_page_for_write.
187 */
188#define pmd_hugewillfault(pmd) (0)
189#define pmd_thp_or_huge(pmd) (0)
190
184#endif /* __ASSEMBLY__ */ 191#endif /* __ASSEMBLY__ */
185 192
186#endif /* _ASM_PGTABLE_2LEVEL_H */ 193#endif /* _ASM_PGTABLE_2LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 5689c18c85f5..39c54cfa03e9 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -206,6 +206,9 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
206#define __HAVE_ARCH_PMD_WRITE 206#define __HAVE_ARCH_PMD_WRITE
207#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY)) 207#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
208 208
209#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
210#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
211
209#ifdef CONFIG_TRANSPARENT_HUGEPAGE 212#ifdef CONFIG_TRANSPARENT_HUGEPAGE
210#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) 213#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
211#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) 214#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index c50f05609501..8d6a089dfb76 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -49,7 +49,7 @@ extern struct meminfo meminfo;
49#define bank_phys_end(bank) ((bank)->start + (bank)->size) 49#define bank_phys_end(bank) ((bank)->start + (bank)->size)
50#define bank_phys_size(bank) (bank)->size 50#define bank_phys_size(bank) (bank)->size
51 51
52extern int arm_add_memory(phys_addr_t start, phys_addr_t size); 52extern int arm_add_memory(u64 start, u64 size);
53extern void early_print(const char *str, ...); 53extern void early_print(const char *str, ...);
54extern void dump_machine_table(void); 54extern void dump_machine_table(void);
55 55
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 4f2c28060c9a..ed6c22919e47 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -127,10 +127,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
127 dsb_sev(); 127 dsb_sev();
128} 128}
129 129
130static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
131{
132 return lock.tickets.owner == lock.tickets.next;
133}
134
130static inline int arch_spin_is_locked(arch_spinlock_t *lock) 135static inline int arch_spin_is_locked(arch_spinlock_t *lock)
131{ 136{
132 struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); 137 return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
133 return tickets.owner != tickets.next;
134} 138}
135 139
136static inline int arch_spin_is_contended(arch_spinlock_t *lock) 140static inline int arch_spin_is_contended(arch_spinlock_t *lock)
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 38960264040c..def9e570199f 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -560,37 +560,6 @@ static inline void __flush_bp_all(void)
560 asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero)); 560 asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
561} 561}
562 562
563#include <asm/cputype.h>
564#ifdef CONFIG_ARM_ERRATA_798181
565static inline int erratum_a15_798181(void)
566{
567 unsigned int midr = read_cpuid_id();
568
569 /* Cortex-A15 r0p0..r3p2 affected */
570 if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
571 return 0;
572 return 1;
573}
574
575static inline void dummy_flush_tlb_a15_erratum(void)
576{
577 /*
578 * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
579 */
580 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
581 dsb(ish);
582}
583#else
584static inline int erratum_a15_798181(void)
585{
586 return 0;
587}
588
589static inline void dummy_flush_tlb_a15_erratum(void)
590{
591}
592#endif
593
594/* 563/*
595 * flush_pmd_entry 564 * flush_pmd_entry
596 * 565 *
@@ -697,4 +666,21 @@ extern void flush_bp_all(void);
697 666
698#endif 667#endif
699 668
669#ifndef __ASSEMBLY__
670#ifdef CONFIG_ARM_ERRATA_798181
671extern void erratum_a15_798181_init(void);
672#else
673static inline void erratum_a15_798181_init(void) {}
674#endif
675extern bool (*erratum_a15_798181_handler)(void);
676
677static inline bool erratum_a15_798181(void)
678{
679 if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) &&
680 erratum_a15_798181_handler))
681 return erratum_a15_798181_handler();
682 return false;
683}
684#endif
685
700#endif 686#endif
diff --git a/arch/arm/include/debug/efm32.S b/arch/arm/include/debug/efm32.S
new file mode 100644
index 000000000000..2265a199280c
--- /dev/null
+++ b/arch/arm/include/debug/efm32.S
@@ -0,0 +1,45 @@
1/*
2 * Copyright (C) 2013 Pengutronix
3 * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#define UARTn_CMD 0x000c
11#define UARTn_CMD_TXEN 0x0004
12
13#define UARTn_STATUS 0x0010
14#define UARTn_STATUS_TXC 0x0020
15#define UARTn_STATUS_TXBL 0x0040
16
17#define UARTn_TXDATA 0x0034
18
19 .macro addruart, rx, tmp
20 ldr \rx, =(CONFIG_DEBUG_UART_PHYS)
21
22 /*
23 * enable TX. The driver might disable it to save energy. We
24 * don't care about disabling at the end as during debug power
25 * consumption isn't that important.
26 */
27 ldr \tmp, =(UARTn_CMD_TXEN)
28 str \tmp, [\rx, #UARTn_CMD]
29 .endm
30
31 .macro senduart,rd,rx
32 strb \rd, [\rx, #UARTn_TXDATA]
33 .endm
34
35 .macro waituart,rd,rx
361001: ldr \rd, [\rx, #UARTn_STATUS]
37 tst \rd, #UARTn_STATUS_TXBL
38 beq 1001b
39 .endm
40
41 .macro busyuart,rd,rx
421001: ldr \rd, [\rx, UARTn_STATUS]
43 tst \rd, #UARTn_STATUS_TXC
44 bne 1001b
45 .endm
diff --git a/arch/arm/include/debug/msm.S b/arch/arm/include/debug/msm.S
index 9166e1bc470e..9d653d475903 100644
--- a/arch/arm/include/debug/msm.S
+++ b/arch/arm/include/debug/msm.S
@@ -46,6 +46,11 @@
46#define MSM_DEBUG_UART_PHYS 0x16440000 46#define MSM_DEBUG_UART_PHYS 0x16440000
47#endif 47#endif
48 48
49#ifdef CONFIG_DEBUG_MSM8974_UART
50#define MSM_DEBUG_UART_BASE 0xFA71E000
51#define MSM_DEBUG_UART_PHYS 0xF991E000
52#endif
53
49 .macro addruart, rp, rv, tmp 54 .macro addruart, rp, rv, tmp
50#ifdef MSM_DEBUG_UART_PHYS 55#ifdef MSM_DEBUG_UART_PHYS
51 ldr \rp, =MSM_DEBUG_UART_PHYS 56 ldr \rp, =MSM_DEBUG_UART_PHYS
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 7b95de601357..3d446605cbf8 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -344,13 +344,13 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
344 /* Breakpoint */ 344 /* Breakpoint */
345 ctrl_base = ARM_BASE_BCR; 345 ctrl_base = ARM_BASE_BCR;
346 val_base = ARM_BASE_BVR; 346 val_base = ARM_BASE_BVR;
347 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 347 slots = this_cpu_ptr(bp_on_reg);
348 max_slots = core_num_brps; 348 max_slots = core_num_brps;
349 } else { 349 } else {
350 /* Watchpoint */ 350 /* Watchpoint */
351 ctrl_base = ARM_BASE_WCR; 351 ctrl_base = ARM_BASE_WCR;
352 val_base = ARM_BASE_WVR; 352 val_base = ARM_BASE_WVR;
353 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 353 slots = this_cpu_ptr(wp_on_reg);
354 max_slots = core_num_wrps; 354 max_slots = core_num_wrps;
355 } 355 }
356 356
@@ -396,12 +396,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
396 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { 396 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
397 /* Breakpoint */ 397 /* Breakpoint */
398 base = ARM_BASE_BCR; 398 base = ARM_BASE_BCR;
399 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 399 slots = this_cpu_ptr(bp_on_reg);
400 max_slots = core_num_brps; 400 max_slots = core_num_brps;
401 } else { 401 } else {
402 /* Watchpoint */ 402 /* Watchpoint */
403 base = ARM_BASE_WCR; 403 base = ARM_BASE_WCR;
404 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 404 slots = this_cpu_ptr(wp_on_reg);
405 max_slots = core_num_wrps; 405 max_slots = core_num_wrps;
406 } 406 }
407 407
@@ -697,7 +697,7 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
697 struct arch_hw_breakpoint *info; 697 struct arch_hw_breakpoint *info;
698 struct arch_hw_breakpoint_ctrl ctrl; 698 struct arch_hw_breakpoint_ctrl ctrl;
699 699
700 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 700 slots = this_cpu_ptr(wp_on_reg);
701 701
702 for (i = 0; i < core_num_wrps; ++i) { 702 for (i = 0; i < core_num_wrps; ++i) {
703 rcu_read_lock(); 703 rcu_read_lock();
@@ -768,7 +768,7 @@ static void watchpoint_single_step_handler(unsigned long pc)
768 struct perf_event *wp, **slots; 768 struct perf_event *wp, **slots;
769 struct arch_hw_breakpoint *info; 769 struct arch_hw_breakpoint *info;
770 770
771 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 771 slots = this_cpu_ptr(wp_on_reg);
772 772
773 for (i = 0; i < core_num_wrps; ++i) { 773 for (i = 0; i < core_num_wrps; ++i) {
774 rcu_read_lock(); 774 rcu_read_lock();
@@ -802,7 +802,7 @@ static void breakpoint_handler(unsigned long unknown, struct pt_regs *regs)
802 struct arch_hw_breakpoint *info; 802 struct arch_hw_breakpoint *info;
803 struct arch_hw_breakpoint_ctrl ctrl; 803 struct arch_hw_breakpoint_ctrl ctrl;
804 804
805 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 805 slots = this_cpu_ptr(bp_on_reg);
806 806
807 /* The exception entry code places the amended lr in the PC. */ 807 /* The exception entry code places the amended lr in the PC. */
808 addr = regs->ARM_pc; 808 addr = regs->ARM_pc;
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 170e9f34003f..a7b621ece23d 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -171,13 +171,13 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
171 171
172static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 172static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
173{ 173{
174 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 174 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
175 kcb->kprobe_status = kcb->prev_kprobe.status; 175 kcb->kprobe_status = kcb->prev_kprobe.status;
176} 176}
177 177
178static void __kprobes set_current_kprobe(struct kprobe *p) 178static void __kprobes set_current_kprobe(struct kprobe *p)
179{ 179{
180 __get_cpu_var(current_kprobe) = p; 180 __this_cpu_write(current_kprobe, p);
181} 181}
182 182
183static void __kprobes 183static void __kprobes
@@ -421,10 +421,10 @@ static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
421 continue; 421 continue;
422 422
423 if (ri->rp && ri->rp->handler) { 423 if (ri->rp && ri->rp->handler) {
424 __get_cpu_var(current_kprobe) = &ri->rp->kp; 424 __this_cpu_write(current_kprobe, &ri->rp->kp);
425 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE; 425 get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
426 ri->rp->handler(ri, regs); 426 ri->rp->handler(ri, regs);
427 __get_cpu_var(current_kprobe) = NULL; 427 __this_cpu_write(current_kprobe, NULL);
428 } 428 }
429 429
430 orig_ret_address = (unsigned long)ri->ret_addr; 430 orig_ret_address = (unsigned long)ri->ret_addr;
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 8d6147b2001f..d85055cd24ba 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -68,7 +68,7 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
68 68
69static struct pmu_hw_events *cpu_pmu_get_cpu_events(void) 69static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
70{ 70{
71 return &__get_cpu_var(cpu_hw_events); 71 return this_cpu_ptr(&cpu_hw_events);
72} 72}
73 73
74static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) 74static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 0e1e2b3afa45..53c3901f7ee3 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -599,6 +599,8 @@ static void __init setup_processor(void)
599 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); 599 elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
600#endif 600#endif
601 601
602 erratum_a15_798181_init();
603
602 feat_v6_fixup(); 604 feat_v6_fixup();
603 605
604 cacheid_init(); 606 cacheid_init();
@@ -619,9 +621,10 @@ void __init dump_machine_table(void)
619 /* can't use cpu_relax() here as it may require MMU setup */; 621 /* can't use cpu_relax() here as it may require MMU setup */;
620} 622}
621 623
622int __init arm_add_memory(phys_addr_t start, phys_addr_t size) 624int __init arm_add_memory(u64 start, u64 size)
623{ 625{
624 struct membank *bank = &meminfo.bank[meminfo.nr_banks]; 626 struct membank *bank = &meminfo.bank[meminfo.nr_banks];
627 u64 aligned_start;
625 628
626 if (meminfo.nr_banks >= NR_BANKS) { 629 if (meminfo.nr_banks >= NR_BANKS) {
627 printk(KERN_CRIT "NR_BANKS too low, " 630 printk(KERN_CRIT "NR_BANKS too low, "
@@ -634,10 +637,16 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
634 * Size is appropriately rounded down, start is rounded up. 637 * Size is appropriately rounded down, start is rounded up.
635 */ 638 */
636 size -= start & ~PAGE_MASK; 639 size -= start & ~PAGE_MASK;
637 bank->start = PAGE_ALIGN(start); 640 aligned_start = PAGE_ALIGN(start);
641
642#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
643 if (aligned_start > ULONG_MAX) {
644 printk(KERN_CRIT "Ignoring memory at 0x%08llx outside "
645 "32-bit physical address space\n", (long long)start);
646 return -EINVAL;
647 }
638 648
639#ifndef CONFIG_ARM_LPAE 649 if (aligned_start + size > ULONG_MAX) {
640 if (bank->start + size < bank->start) {
641 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in " 650 printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
642 "32-bit physical address space\n", (long long)start); 651 "32-bit physical address space\n", (long long)start);
643 /* 652 /*
@@ -645,10 +654,11 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
645 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB. 654 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
646 * This means we lose a page after masking. 655 * This means we lose a page after masking.
647 */ 656 */
648 size = ULONG_MAX - bank->start; 657 size = ULONG_MAX - aligned_start;
649 } 658 }
650#endif 659#endif
651 660
661 bank->start = aligned_start;
652 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1); 662 bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
653 663
654 /* 664 /*
@@ -669,8 +679,8 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
669static int __init early_mem(char *p) 679static int __init early_mem(char *p)
670{ 680{
671 static int usermem __initdata = 0; 681 static int usermem __initdata = 0;
672 phys_addr_t size; 682 u64 size;
673 phys_addr_t start; 683 u64 start;
674 char *endp; 684 char *endp;
675 685
676 /* 686 /*
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 72024ea8a3a6..e115cbb0d25a 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -25,6 +25,7 @@
25#include <linux/clockchips.h> 25#include <linux/clockchips.h>
26#include <linux/completion.h> 26#include <linux/completion.h>
27#include <linux/cpufreq.h> 27#include <linux/cpufreq.h>
28#include <linux/irq_work.h>
28 29
29#include <linux/atomic.h> 30#include <linux/atomic.h>
30#include <asm/smp.h> 31#include <asm/smp.h>
@@ -66,6 +67,7 @@ enum ipi_msg_type {
66 IPI_CALL_FUNC, 67 IPI_CALL_FUNC,
67 IPI_CALL_FUNC_SINGLE, 68 IPI_CALL_FUNC_SINGLE,
68 IPI_CPU_STOP, 69 IPI_CPU_STOP,
70 IPI_IRQ_WORK,
69}; 71};
70 72
71static DECLARE_COMPLETION(cpu_running); 73static DECLARE_COMPLETION(cpu_running);
@@ -448,6 +450,14 @@ void arch_send_call_function_single_ipi(int cpu)
448 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); 450 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
449} 451}
450 452
453#ifdef CONFIG_IRQ_WORK
454void arch_irq_work_raise(void)
455{
456 if (is_smp())
457 smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
458}
459#endif
460
451static const char *ipi_types[NR_IPI] = { 461static const char *ipi_types[NR_IPI] = {
452#define S(x,s) [x] = s 462#define S(x,s) [x] = s
453 S(IPI_WAKEUP, "CPU wakeup interrupts"), 463 S(IPI_WAKEUP, "CPU wakeup interrupts"),
@@ -456,6 +466,7 @@ static const char *ipi_types[NR_IPI] = {
456 S(IPI_CALL_FUNC, "Function call interrupts"), 466 S(IPI_CALL_FUNC, "Function call interrupts"),
457 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), 467 S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"),
458 S(IPI_CPU_STOP, "CPU stop interrupts"), 468 S(IPI_CPU_STOP, "CPU stop interrupts"),
469 S(IPI_IRQ_WORK, "IRQ work interrupts"),
459}; 470};
460 471
461void show_ipi_list(struct seq_file *p, int prec) 472void show_ipi_list(struct seq_file *p, int prec)
@@ -565,6 +576,14 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
565 irq_exit(); 576 irq_exit();
566 break; 577 break;
567 578
579#ifdef CONFIG_IRQ_WORK
580 case IPI_IRQ_WORK:
581 irq_enter();
582 irq_work_run();
583 irq_exit();
584 break;
585#endif
586
568 default: 587 default:
569 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", 588 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n",
570 cpu, ipinr); 589 cpu, ipinr);
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 83ccca303df8..95d063620b76 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -70,6 +70,40 @@ static inline void ipi_flush_bp_all(void *ignored)
70 local_flush_bp_all(); 70 local_flush_bp_all();
71} 71}
72 72
73#ifdef CONFIG_ARM_ERRATA_798181
74bool (*erratum_a15_798181_handler)(void);
75
76static bool erratum_a15_798181_partial(void)
77{
78 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
79 dsb(ish);
80 return false;
81}
82
83static bool erratum_a15_798181_broadcast(void)
84{
85 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
86 dsb(ish);
87 return true;
88}
89
90void erratum_a15_798181_init(void)
91{
92 unsigned int midr = read_cpuid_id();
93 unsigned int revidr = read_cpuid(CPUID_REVIDR);
94
95 /* Cortex-A15 r0p0..r3p2 w/o ECO fix affected */
96 if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2 ||
97 (revidr & 0x210) == 0x210) {
98 return;
99 }
100 if (revidr & 0x10)
101 erratum_a15_798181_handler = erratum_a15_798181_partial;
102 else
103 erratum_a15_798181_handler = erratum_a15_798181_broadcast;
104}
105#endif
106
73static void ipi_flush_tlb_a15_erratum(void *arg) 107static void ipi_flush_tlb_a15_erratum(void *arg)
74{ 108{
75 dmb(); 109 dmb();
@@ -80,7 +114,6 @@ static void broadcast_tlb_a15_erratum(void)
80 if (!erratum_a15_798181()) 114 if (!erratum_a15_798181())
81 return; 115 return;
82 116
83 dummy_flush_tlb_a15_erratum();
84 smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1); 117 smp_call_function(ipi_flush_tlb_a15_erratum, NULL, 1);
85} 118}
86 119
@@ -92,7 +125,6 @@ static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
92 if (!erratum_a15_798181()) 125 if (!erratum_a15_798181())
93 return; 126 return;
94 127
95 dummy_flush_tlb_a15_erratum();
96 this_cpu = get_cpu(); 128 this_cpu = get_cpu();
97 a15_erratum_get_cpumask(this_cpu, mm, &mask); 129 a15_erratum_get_cpumask(this_cpu, mm, &mask);
98 smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1); 130 smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 9c697db2787e..aea7ccb8d397 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -65,7 +65,7 @@ static bool vgic_present;
65static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu) 65static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
66{ 66{
67 BUG_ON(preemptible()); 67 BUG_ON(preemptible());
68 __get_cpu_var(kvm_arm_running_vcpu) = vcpu; 68 __this_cpu_write(kvm_arm_running_vcpu, vcpu);
69} 69}
70 70
71/** 71/**
@@ -75,7 +75,7 @@ static void kvm_arm_set_running_vcpu(struct kvm_vcpu *vcpu)
75struct kvm_vcpu *kvm_arm_get_running_vcpu(void) 75struct kvm_vcpu *kvm_arm_get_running_vcpu(void)
76{ 76{
77 BUG_ON(preemptible()); 77 BUG_ON(preemptible());
78 return __get_cpu_var(kvm_arm_running_vcpu); 78 return __this_cpu_read(kvm_arm_running_vcpu);
79} 79}
80 80
81/** 81/**
@@ -815,7 +815,7 @@ static void cpu_init_hyp_mode(void *dummy)
815 815
816 boot_pgd_ptr = kvm_mmu_get_boot_httbr(); 816 boot_pgd_ptr = kvm_mmu_get_boot_httbr();
817 pgd_ptr = kvm_mmu_get_httbr(); 817 pgd_ptr = kvm_mmu_get_httbr();
818 stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); 818 stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
819 hyp_stack_ptr = stack_page + PAGE_SIZE; 819 hyp_stack_ptr = stack_page + PAGE_SIZE;
820 vector_ptr = (unsigned long)__kvm_hyp_vector; 820 vector_ptr = (unsigned long)__kvm_hyp_vector;
821 821
diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
index 025f742dd4df..3e58d710013c 100644
--- a/arch/arm/lib/uaccess_with_memcpy.c
+++ b/arch/arm/lib/uaccess_with_memcpy.c
@@ -18,6 +18,7 @@
18#include <linux/hardirq.h> /* for in_atomic() */ 18#include <linux/hardirq.h> /* for in_atomic() */
19#include <linux/gfp.h> 19#include <linux/gfp.h>
20#include <linux/highmem.h> 20#include <linux/highmem.h>
21#include <linux/hugetlb.h>
21#include <asm/current.h> 22#include <asm/current.h>
22#include <asm/page.h> 23#include <asm/page.h>
23 24
@@ -40,7 +41,35 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
40 return 0; 41 return 0;
41 42
42 pmd = pmd_offset(pud, addr); 43 pmd = pmd_offset(pud, addr);
43 if (unlikely(pmd_none(*pmd) || pmd_bad(*pmd))) 44 if (unlikely(pmd_none(*pmd)))
45 return 0;
46
47 /*
48 * A pmd can be bad if it refers to a HugeTLB or THP page.
49 *
50 * Both THP and HugeTLB pages have the same pmd layout
51 * and should not be manipulated by the pte functions.
52 *
53 * Lock the page table for the destination and check
54 * to see that it's still huge and whether or not we will
55 * need to fault on write, or if we have a splitting THP.
56 */
57 if (unlikely(pmd_thp_or_huge(*pmd))) {
58 ptl = &current->mm->page_table_lock;
59 spin_lock(ptl);
60 if (unlikely(!pmd_thp_or_huge(*pmd)
61 || pmd_hugewillfault(*pmd)
62 || pmd_trans_splitting(*pmd))) {
63 spin_unlock(ptl);
64 return 0;
65 }
66
67 *ptep = NULL;
68 *ptlp = ptl;
69 return 1;
70 }
71
72 if (unlikely(pmd_bad(*pmd)))
44 return 0; 73 return 0;
45 74
46 pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl); 75 pte = pte_offset_map_lock(current->mm, pmd, addr, &ptl);
@@ -94,7 +123,10 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
94 from += tocopy; 123 from += tocopy;
95 n -= tocopy; 124 n -= tocopy;
96 125
97 pte_unmap_unlock(pte, ptl); 126 if (pte)
127 pte_unmap_unlock(pte, ptl);
128 else
129 spin_unlock(ptl);
98 } 130 }
99 if (!atomic) 131 if (!atomic)
100 up_read(&current->mm->mmap_sem); 132 up_read(&current->mm->mmap_sem);
@@ -147,7 +179,10 @@ __clear_user_memset(void __user *addr, unsigned long n)
147 addr += tocopy; 179 addr += tocopy;
148 n -= tocopy; 180 n -= tocopy;
149 181
150 pte_unmap_unlock(pte, ptl); 182 if (pte)
183 pte_unmap_unlock(pte, ptl);
184 else
185 spin_unlock(ptl);
151 } 186 }
152 up_read(&current->mm->mmap_sem); 187 up_read(&current->mm->mmap_sem);
153 188
diff --git a/arch/arm/mach-footbridge/netwinder-hw.c b/arch/arm/mach-footbridge/netwinder-hw.c
index 1fd2cf097e30..eb1fa5c84723 100644
--- a/arch/arm/mach-footbridge/netwinder-hw.c
+++ b/arch/arm/mach-footbridge/netwinder-hw.c
@@ -692,14 +692,14 @@ static void netwinder_led_set(struct led_classdev *cdev,
692 unsigned long flags; 692 unsigned long flags;
693 u32 reg; 693 u32 reg;
694 694
695 spin_lock_irqsave(&nw_gpio_lock, flags); 695 raw_spin_lock_irqsave(&nw_gpio_lock, flags);
696 reg = nw_gpio_read(); 696 reg = nw_gpio_read();
697 if (b != LED_OFF) 697 if (b != LED_OFF)
698 reg &= ~led->mask; 698 reg &= ~led->mask;
699 else 699 else
700 reg |= led->mask; 700 reg |= led->mask;
701 nw_gpio_modify_op(led->mask, reg); 701 nw_gpio_modify_op(led->mask, reg);
702 spin_unlock_irqrestore(&nw_gpio_lock, flags); 702 raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
703} 703}
704 704
705static enum led_brightness netwinder_led_get(struct led_classdev *cdev) 705static enum led_brightness netwinder_led_get(struct led_classdev *cdev)
@@ -709,9 +709,9 @@ static enum led_brightness netwinder_led_get(struct led_classdev *cdev)
709 unsigned long flags; 709 unsigned long flags;
710 u32 reg; 710 u32 reg;
711 711
712 spin_lock_irqsave(&nw_gpio_lock, flags); 712 raw_spin_lock_irqsave(&nw_gpio_lock, flags);
713 reg = nw_gpio_read(); 713 reg = nw_gpio_read();
714 spin_unlock_irqrestore(&nw_gpio_lock, flags); 714 raw_spin_unlock_irqrestore(&nw_gpio_lock, flags);
715 715
716 return (reg & led->mask) ? LED_OFF : LED_FULL; 716 return (reg & led->mask) ? LED_OFF : LED_FULL;
717} 717}
diff --git a/arch/arm/mach-vexpress/dcscb.c b/arch/arm/mach-vexpress/dcscb.c
index 3a6384c6c435..14d499688736 100644
--- a/arch/arm/mach-vexpress/dcscb.c
+++ b/arch/arm/mach-vexpress/dcscb.c
@@ -133,38 +133,8 @@ static void dcscb_power_down(void)
133 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { 133 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
134 arch_spin_unlock(&dcscb_lock); 134 arch_spin_unlock(&dcscb_lock);
135 135
136 /* 136 /* Flush all cache levels for this cluster. */
137 * Flush all cache levels for this cluster. 137 v7_exit_coherency_flush(all);
138 *
139 * To do so we do:
140 * - Clear the SCTLR.C bit to prevent further cache allocations
141 * - Flush the whole cache
142 * - Clear the ACTLR "SMP" bit to disable local coherency
143 *
144 * Let's do it in the safest possible way i.e. with
145 * no memory access within the following sequence
146 * including to the stack.
147 *
148 * Note: fp is preserved to the stack explicitly prior doing
149 * this since adding it to the clobber list is incompatible
150 * with having CONFIG_FRAME_POINTER=y.
151 */
152 asm volatile(
153 "str fp, [sp, #-4]! \n\t"
154 "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
155 "bic r0, r0, #"__stringify(CR_C)" \n\t"
156 "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
157 "isb \n\t"
158 "bl v7_flush_dcache_all \n\t"
159 "clrex \n\t"
160 "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
161 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
162 "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
163 "isb \n\t"
164 "dsb \n\t"
165 "ldr fp, [sp], #4"
166 : : : "r0","r1","r2","r3","r4","r5","r6","r7",
167 "r9","r10","lr","memory");
168 138
169 /* 139 /*
170 * This is a harmless no-op. On platforms with a real 140 * This is a harmless no-op. On platforms with a real
@@ -183,26 +153,8 @@ static void dcscb_power_down(void)
183 } else { 153 } else {
184 arch_spin_unlock(&dcscb_lock); 154 arch_spin_unlock(&dcscb_lock);
185 155
186 /* 156 /* Disable and flush the local CPU cache. */
187 * Flush the local CPU cache. 157 v7_exit_coherency_flush(louis);
188 * Let's do it in the safest possible way as above.
189 */
190 asm volatile(
191 "str fp, [sp, #-4]! \n\t"
192 "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
193 "bic r0, r0, #"__stringify(CR_C)" \n\t"
194 "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
195 "isb \n\t"
196 "bl v7_flush_dcache_louis \n\t"
197 "clrex \n\t"
198 "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
199 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
200 "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
201 "isb \n\t"
202 "dsb \n\t"
203 "ldr fp, [sp], #4"
204 : : : "r0","r1","r2","r3","r4","r5","r6","r7",
205 "r9","r10","lr","memory");
206 } 158 }
207 159
208 __mcpm_cpu_down(cpu, cluster); 160 __mcpm_cpu_down(cpu, cluster);
diff --git a/arch/arm/mach-vexpress/tc2_pm.c b/arch/arm/mach-vexpress/tc2_pm.c
index e6eb48192912..4eb92ebfd953 100644
--- a/arch/arm/mach-vexpress/tc2_pm.c
+++ b/arch/arm/mach-vexpress/tc2_pm.c
@@ -156,32 +156,7 @@ static void tc2_pm_down(u64 residency)
156 : : "r" (0x400) ); 156 : : "r" (0x400) );
157 } 157 }
158 158
159 /* 159 v7_exit_coherency_flush(all);
160 * We need to disable and flush the whole (L1 and L2) cache.
161 * Let's do it in the safest possible way i.e. with
162 * no memory access within the following sequence
163 * including the stack.
164 *
165 * Note: fp is preserved to the stack explicitly prior doing
166 * this since adding it to the clobber list is incompatible
167 * with having CONFIG_FRAME_POINTER=y.
168 */
169 asm volatile(
170 "str fp, [sp, #-4]! \n\t"
171 "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
172 "bic r0, r0, #"__stringify(CR_C)" \n\t"
173 "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
174 "isb \n\t"
175 "bl v7_flush_dcache_all \n\t"
176 "clrex \n\t"
177 "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
178 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
179 "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
180 "isb \n\t"
181 "dsb \n\t"
182 "ldr fp, [sp], #4"
183 : : : "r0","r1","r2","r3","r4","r5","r6","r7",
184 "r9","r10","lr","memory");
185 160
186 cci_disable_port_by_cpu(mpidr); 161 cci_disable_port_by_cpu(mpidr);
187 162
@@ -197,26 +172,7 @@ static void tc2_pm_down(u64 residency)
197 172
198 arch_spin_unlock(&tc2_pm_lock); 173 arch_spin_unlock(&tc2_pm_lock);
199 174
200 /* 175 v7_exit_coherency_flush(louis);
201 * We need to disable and flush only the L1 cache.
202 * Let's do it in the safest possible way as above.
203 */
204 asm volatile(
205 "str fp, [sp, #-4]! \n\t"
206 "mrc p15, 0, r0, c1, c0, 0 @ get CR \n\t"
207 "bic r0, r0, #"__stringify(CR_C)" \n\t"
208 "mcr p15, 0, r0, c1, c0, 0 @ set CR \n\t"
209 "isb \n\t"
210 "bl v7_flush_dcache_louis \n\t"
211 "clrex \n\t"
212 "mrc p15, 0, r0, c1, c0, 1 @ get AUXCR \n\t"
213 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t"
214 "mcr p15, 0, r0, c1, c0, 1 @ set AUXCR \n\t"
215 "isb \n\t"
216 "dsb \n\t"
217 "ldr fp, [sp], #4"
218 : : : "r0","r1","r2","r3","r4","r5","r6","r7",
219 "r9","r10","lr","memory");
220 } 176 }
221 177
222 __mcpm_cpu_down(cpu, cluster); 178 __mcpm_cpu_down(cpu, cluster);
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 0c6356255fe3..d27158c38eb0 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -202,13 +202,11 @@ int valid_phys_addr_range(phys_addr_t addr, size_t size)
202} 202}
203 203
204/* 204/*
205 * We don't use supersection mappings for mmap() on /dev/mem, which 205 * Do not allow /dev/mem mappings beyond the supported physical range.
206 * means that we can't map the memory area above the 4G barrier into
207 * userspace.
208 */ 206 */
209int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) 207int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
210{ 208{
211 return !(pfn + (size >> PAGE_SHIFT) > 0x00100000); 209 return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
212} 210}
213 211
214#ifdef CONFIG_STRICT_DEVMEM 212#ifdef CONFIG_STRICT_DEVMEM
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 836364468571..01de5aaa3edc 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -126,20 +126,6 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
126 return oldval; 126 return oldval;
127} 127}
128 128
129static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
130{
131 unsigned long tmp, tmp2;
132
133 asm volatile("// atomic_clear_mask\n"
134"1: ldxr %0, %2\n"
135" bic %0, %0, %3\n"
136" stxr %w1, %0, %2\n"
137" cbnz %w1, 1b"
138 : "=&r" (tmp), "=&r" (tmp2), "+Q" (*addr)
139 : "Ir" (mask)
140 : "cc");
141}
142
143#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 129#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
144 130
145static inline int __atomic_add_unless(atomic_t *v, int a, int u) 131static inline int __atomic_add_unless(atomic_t *v, int a, int u)
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index cbfacf7fb438..6a0a9b132d7a 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -27,7 +27,6 @@
27#include <linux/uaccess.h> 27#include <linux/uaccess.h>
28 28
29#include <asm/debug-monitors.h> 29#include <asm/debug-monitors.h>
30#include <asm/local.h>
31#include <asm/cputype.h> 30#include <asm/cputype.h>
32#include <asm/system_misc.h> 31#include <asm/system_misc.h>
33 32
@@ -89,8 +88,8 @@ early_param("nodebugmon", early_debug_disable);
89 * Keep track of debug users on each core. 88 * Keep track of debug users on each core.
90 * The ref counts are per-cpu so we use a local_t type. 89 * The ref counts are per-cpu so we use a local_t type.
91 */ 90 */
92static DEFINE_PER_CPU(local_t, mde_ref_count); 91static DEFINE_PER_CPU(int, mde_ref_count);
93static DEFINE_PER_CPU(local_t, kde_ref_count); 92static DEFINE_PER_CPU(int, kde_ref_count);
94 93
95void enable_debug_monitors(enum debug_el el) 94void enable_debug_monitors(enum debug_el el)
96{ 95{
@@ -98,11 +97,11 @@ void enable_debug_monitors(enum debug_el el)
98 97
99 WARN_ON(preemptible()); 98 WARN_ON(preemptible());
100 99
101 if (local_inc_return(&__get_cpu_var(mde_ref_count)) == 1) 100 if (this_cpu_inc_return(mde_ref_count) == 1)
102 enable = DBG_MDSCR_MDE; 101 enable = DBG_MDSCR_MDE;
103 102
104 if (el == DBG_ACTIVE_EL1 && 103 if (el == DBG_ACTIVE_EL1 &&
105 local_inc_return(&__get_cpu_var(kde_ref_count)) == 1) 104 this_cpu_inc_return(kde_ref_count) == 1)
106 enable |= DBG_MDSCR_KDE; 105 enable |= DBG_MDSCR_KDE;
107 106
108 if (enable && debug_enabled) { 107 if (enable && debug_enabled) {
@@ -118,11 +117,11 @@ void disable_debug_monitors(enum debug_el el)
118 117
119 WARN_ON(preemptible()); 118 WARN_ON(preemptible());
120 119
121 if (local_dec_and_test(&__get_cpu_var(mde_ref_count))) 120 if (this_cpu_dec_return(mde_ref_count) == 0)
122 disable = ~DBG_MDSCR_MDE; 121 disable = ~DBG_MDSCR_MDE;
123 122
124 if (el == DBG_ACTIVE_EL1 && 123 if (el == DBG_ACTIVE_EL1 &&
125 local_dec_and_test(&__get_cpu_var(kde_ref_count))) 124 this_cpu_dec_return(kde_ref_count) == 0)
126 disable &= ~DBG_MDSCR_KDE; 125 disable &= ~DBG_MDSCR_KDE;
127 126
128 if (disable) { 127 if (disable) {
diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
index 329218ca9ffb..ff516f6691e4 100644
--- a/arch/arm64/kernel/hw_breakpoint.c
+++ b/arch/arm64/kernel/hw_breakpoint.c
@@ -184,14 +184,14 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
184 /* Breakpoint */ 184 /* Breakpoint */
185 ctrl_reg = AARCH64_DBG_REG_BCR; 185 ctrl_reg = AARCH64_DBG_REG_BCR;
186 val_reg = AARCH64_DBG_REG_BVR; 186 val_reg = AARCH64_DBG_REG_BVR;
187 slots = __get_cpu_var(bp_on_reg); 187 slots = this_cpu_ptr(bp_on_reg);
188 max_slots = core_num_brps; 188 max_slots = core_num_brps;
189 reg_enable = !debug_info->bps_disabled; 189 reg_enable = !debug_info->bps_disabled;
190 } else { 190 } else {
191 /* Watchpoint */ 191 /* Watchpoint */
192 ctrl_reg = AARCH64_DBG_REG_WCR; 192 ctrl_reg = AARCH64_DBG_REG_WCR;
193 val_reg = AARCH64_DBG_REG_WVR; 193 val_reg = AARCH64_DBG_REG_WVR;
194 slots = __get_cpu_var(wp_on_reg); 194 slots = this_cpu_ptr(wp_on_reg);
195 max_slots = core_num_wrps; 195 max_slots = core_num_wrps;
196 reg_enable = !debug_info->wps_disabled; 196 reg_enable = !debug_info->wps_disabled;
197 } 197 }
@@ -230,12 +230,12 @@ void arch_uninstall_hw_breakpoint(struct perf_event *bp)
230 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) { 230 if (info->ctrl.type == ARM_BREAKPOINT_EXECUTE) {
231 /* Breakpoint */ 231 /* Breakpoint */
232 base = AARCH64_DBG_REG_BCR; 232 base = AARCH64_DBG_REG_BCR;
233 slots = __get_cpu_var(bp_on_reg); 233 slots = this_cpu_ptr(bp_on_reg);
234 max_slots = core_num_brps; 234 max_slots = core_num_brps;
235 } else { 235 } else {
236 /* Watchpoint */ 236 /* Watchpoint */
237 base = AARCH64_DBG_REG_WCR; 237 base = AARCH64_DBG_REG_WCR;
238 slots = __get_cpu_var(wp_on_reg); 238 slots = this_cpu_ptr(wp_on_reg);
239 max_slots = core_num_wrps; 239 max_slots = core_num_wrps;
240 } 240 }
241 241
@@ -505,11 +505,11 @@ static void toggle_bp_registers(int reg, enum debug_el el, int enable)
505 505
506 switch (reg) { 506 switch (reg) {
507 case AARCH64_DBG_REG_BCR: 507 case AARCH64_DBG_REG_BCR:
508 slots = __get_cpu_var(bp_on_reg); 508 slots = this_cpu_ptr(bp_on_reg);
509 max_slots = core_num_brps; 509 max_slots = core_num_brps;
510 break; 510 break;
511 case AARCH64_DBG_REG_WCR: 511 case AARCH64_DBG_REG_WCR:
512 slots = __get_cpu_var(wp_on_reg); 512 slots = this_cpu_ptr(wp_on_reg);
513 max_slots = core_num_wrps; 513 max_slots = core_num_wrps;
514 break; 514 break;
515 default: 515 default:
@@ -546,7 +546,7 @@ static int breakpoint_handler(unsigned long unused, unsigned int esr,
546 struct debug_info *debug_info; 546 struct debug_info *debug_info;
547 struct arch_hw_breakpoint_ctrl ctrl; 547 struct arch_hw_breakpoint_ctrl ctrl;
548 548
549 slots = (struct perf_event **)__get_cpu_var(bp_on_reg); 549 slots = this_cpu_ptr(bp_on_reg);
550 addr = instruction_pointer(regs); 550 addr = instruction_pointer(regs);
551 debug_info = &current->thread.debug; 551 debug_info = &current->thread.debug;
552 552
@@ -596,7 +596,7 @@ unlock:
596 user_enable_single_step(current); 596 user_enable_single_step(current);
597 } else { 597 } else {
598 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0); 598 toggle_bp_registers(AARCH64_DBG_REG_BCR, DBG_ACTIVE_EL1, 0);
599 kernel_step = &__get_cpu_var(stepping_kernel_bp); 599 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
600 600
601 if (*kernel_step != ARM_KERNEL_STEP_NONE) 601 if (*kernel_step != ARM_KERNEL_STEP_NONE)
602 return 0; 602 return 0;
@@ -623,7 +623,7 @@ static int watchpoint_handler(unsigned long addr, unsigned int esr,
623 struct arch_hw_breakpoint *info; 623 struct arch_hw_breakpoint *info;
624 struct arch_hw_breakpoint_ctrl ctrl; 624 struct arch_hw_breakpoint_ctrl ctrl;
625 625
626 slots = (struct perf_event **)__get_cpu_var(wp_on_reg); 626 slots = this_cpu_ptr(wp_on_reg);
627 debug_info = &current->thread.debug; 627 debug_info = &current->thread.debug;
628 628
629 for (i = 0; i < core_num_wrps; ++i) { 629 for (i = 0; i < core_num_wrps; ++i) {
@@ -698,7 +698,7 @@ unlock:
698 user_enable_single_step(current); 698 user_enable_single_step(current);
699 } else { 699 } else {
700 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0); 700 toggle_bp_registers(AARCH64_DBG_REG_WCR, DBG_ACTIVE_EL1, 0);
701 kernel_step = &__get_cpu_var(stepping_kernel_bp); 701 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
702 702
703 if (*kernel_step != ARM_KERNEL_STEP_NONE) 703 if (*kernel_step != ARM_KERNEL_STEP_NONE)
704 return 0; 704 return 0;
@@ -722,7 +722,7 @@ int reinstall_suspended_bps(struct pt_regs *regs)
722 struct debug_info *debug_info = &current->thread.debug; 722 struct debug_info *debug_info = &current->thread.debug;
723 int handled_exception = 0, *kernel_step; 723 int handled_exception = 0, *kernel_step;
724 724
725 kernel_step = &__get_cpu_var(stepping_kernel_bp); 725 kernel_step = this_cpu_ptr(&stepping_kernel_bp);
726 726
727 /* 727 /*
728 * Called from single-step exception handler. 728 * Called from single-step exception handler.
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index cea1594ff933..6983ed5a351a 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1044,7 +1044,7 @@ static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
1044 */ 1044 */
1045 regs = get_irq_regs(); 1045 regs = get_irq_regs();
1046 1046
1047 cpuc = &__get_cpu_var(cpu_hw_events); 1047 cpuc = this_cpu_ptr(&cpu_hw_events);
1048 for (idx = 0; idx < cpu_pmu->num_events; ++idx) { 1048 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
1049 struct perf_event *event = cpuc->events[idx]; 1049 struct perf_event *event = cpuc->events[idx];
1050 struct hw_perf_event *hwc; 1050 struct hw_perf_event *hwc;
@@ -1257,7 +1257,7 @@ device_initcall(register_pmu_driver);
1257 1257
1258static struct pmu_hw_events *armpmu_get_cpu_events(void) 1258static struct pmu_hw_events *armpmu_get_cpu_events(void)
1259{ 1259{
1260 return &__get_cpu_var(cpu_hw_events); 1260 return this_cpu_ptr(&cpu_hw_events);
1261} 1261}
1262 1262
1263static void __init cpu_pmu_init(struct arm_pmu *armpmu) 1263static void __init cpu_pmu_init(struct arm_pmu *armpmu)
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 43ec7e247a80..b327a1b1b7e8 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -21,7 +21,7 @@
21#include <linux/resource.h> 21#include <linux/resource.h>
22#include <linux/regulator/consumer.h> 22#include <linux/regulator/consumer.h>
23 23
24#define AMBA_NR_IRQS 2 24#define AMBA_NR_IRQS 9
25#define AMBA_CID 0xb105f00d 25#define AMBA_CID 0xb105f00d
26 26
27struct clk; 27struct clk;