aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/include
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/include')
-rw-r--r--arch/arm/include/asm/Kbuild1
-rw-r--r--arch/arm/include/asm/assembler.h7
-rw-r--r--arch/arm/include/asm/atomic.h108
-rw-r--r--arch/arm/include/asm/bL_switcher.h77
-rw-r--r--arch/arm/include/asm/bug.h10
-rw-r--r--arch/arm/include/asm/cacheflush.h46
-rw-r--r--arch/arm/include/asm/cmpxchg.h58
-rw-r--r--arch/arm/include/asm/cputype.h1
-rw-r--r--arch/arm/include/asm/hardirq.h2
-rw-r--r--arch/arm/include/asm/hardware/coresight.h8
-rw-r--r--arch/arm/include/asm/kgdb.h3
-rw-r--r--arch/arm/include/asm/mach/arch.h1
-rw-r--r--arch/arm/include/asm/mcpm.h39
-rw-r--r--arch/arm/include/asm/memory.h76
-rw-r--r--arch/arm/include/asm/mmu.h2
-rw-r--r--arch/arm/include/asm/pgtable-2level.h7
-rw-r--r--arch/arm/include/asm/pgtable-3level.h3
-rw-r--r--arch/arm/include/asm/processor.h33
-rw-r--r--arch/arm/include/asm/setup.h2
-rw-r--r--arch/arm/include/asm/smp.h2
-rw-r--r--arch/arm/include/asm/spinlock.h36
-rw-r--r--arch/arm/include/asm/spinlock_types.h2
-rw-r--r--arch/arm/include/asm/tlbflush.h48
-rw-r--r--arch/arm/include/asm/unified.h4
-rw-r--r--arch/arm/include/debug/efm32.S45
-rw-r--r--arch/arm/include/debug/msm.S5
-rw-r--r--arch/arm/include/debug/pl01x.S2
-rw-r--r--arch/arm/include/uapi/asm/Kbuild1
-rw-r--r--arch/arm/include/uapi/asm/perf_regs.h23
29 files changed, 501 insertions, 151 deletions
diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild
index 1a7024b41351..c38b58c80202 100644
--- a/arch/arm/include/asm/Kbuild
+++ b/arch/arm/include/asm/Kbuild
@@ -24,6 +24,7 @@ generic-y += sembuf.h
24generic-y += serial.h 24generic-y += serial.h
25generic-y += shmbuf.h 25generic-y += shmbuf.h
26generic-y += siginfo.h 26generic-y += siginfo.h
27generic-y += simd.h
27generic-y += sizes.h 28generic-y += sizes.h
28generic-y += socket.h 29generic-y += socket.h
29generic-y += sockios.h 30generic-y += sockios.h
diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h
index fcc1b5bf6979..5c2285160575 100644
--- a/arch/arm/include/asm/assembler.h
+++ b/arch/arm/include/asm/assembler.h
@@ -53,6 +53,13 @@
53#define put_byte_3 lsl #0 53#define put_byte_3 lsl #0
54#endif 54#endif
55 55
56/* Select code for any configuration running in BE8 mode */
57#ifdef CONFIG_CPU_ENDIAN_BE8
58#define ARM_BE8(code...) code
59#else
60#define ARM_BE8(code...)
61#endif
62
56/* 63/*
57 * Data preload for architectures that support it 64 * Data preload for architectures that support it
58 */ 65 */
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index da1c77d39327..62d2cb53b069 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -12,6 +12,7 @@
12#define __ASM_ARM_ATOMIC_H 12#define __ASM_ARM_ATOMIC_H
13 13
14#include <linux/compiler.h> 14#include <linux/compiler.h>
15#include <linux/prefetch.h>
15#include <linux/types.h> 16#include <linux/types.h>
16#include <linux/irqflags.h> 17#include <linux/irqflags.h>
17#include <asm/barrier.h> 18#include <asm/barrier.h>
@@ -41,6 +42,7 @@ static inline void atomic_add(int i, atomic_t *v)
41 unsigned long tmp; 42 unsigned long tmp;
42 int result; 43 int result;
43 44
45 prefetchw(&v->counter);
44 __asm__ __volatile__("@ atomic_add\n" 46 __asm__ __volatile__("@ atomic_add\n"
45"1: ldrex %0, [%3]\n" 47"1: ldrex %0, [%3]\n"
46" add %0, %0, %4\n" 48" add %0, %0, %4\n"
@@ -79,6 +81,7 @@ static inline void atomic_sub(int i, atomic_t *v)
79 unsigned long tmp; 81 unsigned long tmp;
80 int result; 82 int result;
81 83
84 prefetchw(&v->counter);
82 __asm__ __volatile__("@ atomic_sub\n" 85 __asm__ __volatile__("@ atomic_sub\n"
83"1: ldrex %0, [%3]\n" 86"1: ldrex %0, [%3]\n"
84" sub %0, %0, %4\n" 87" sub %0, %0, %4\n"
@@ -114,7 +117,8 @@ static inline int atomic_sub_return(int i, atomic_t *v)
114 117
115static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) 118static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
116{ 119{
117 unsigned long oldval, res; 120 int oldval;
121 unsigned long res;
118 122
119 smp_mb(); 123 smp_mb();
120 124
@@ -134,21 +138,6 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
134 return oldval; 138 return oldval;
135} 139}
136 140
137static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
138{
139 unsigned long tmp, tmp2;
140
141 __asm__ __volatile__("@ atomic_clear_mask\n"
142"1: ldrex %0, [%3]\n"
143" bic %0, %0, %4\n"
144" strex %1, %0, [%3]\n"
145" teq %1, #0\n"
146" bne 1b"
147 : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr)
148 : "r" (addr), "Ir" (mask)
149 : "cc");
150}
151
152#else /* ARM_ARCH_6 */ 141#else /* ARM_ARCH_6 */
153 142
154#ifdef CONFIG_SMP 143#ifdef CONFIG_SMP
@@ -197,15 +186,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
197 return ret; 186 return ret;
198} 187}
199 188
200static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
201{
202 unsigned long flags;
203
204 raw_local_irq_save(flags);
205 *addr &= ~mask;
206 raw_local_irq_restore(flags);
207}
208
209#endif /* __LINUX_ARM_ARCH__ */ 189#endif /* __LINUX_ARM_ARCH__ */
210 190
211#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 191#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
@@ -238,15 +218,15 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
238 218
239#ifndef CONFIG_GENERIC_ATOMIC64 219#ifndef CONFIG_GENERIC_ATOMIC64
240typedef struct { 220typedef struct {
241 u64 __aligned(8) counter; 221 long long counter;
242} atomic64_t; 222} atomic64_t;
243 223
244#define ATOMIC64_INIT(i) { (i) } 224#define ATOMIC64_INIT(i) { (i) }
245 225
246#ifdef CONFIG_ARM_LPAE 226#ifdef CONFIG_ARM_LPAE
247static inline u64 atomic64_read(const atomic64_t *v) 227static inline long long atomic64_read(const atomic64_t *v)
248{ 228{
249 u64 result; 229 long long result;
250 230
251 __asm__ __volatile__("@ atomic64_read\n" 231 __asm__ __volatile__("@ atomic64_read\n"
252" ldrd %0, %H0, [%1]" 232" ldrd %0, %H0, [%1]"
@@ -257,7 +237,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
257 return result; 237 return result;
258} 238}
259 239
260static inline void atomic64_set(atomic64_t *v, u64 i) 240static inline void atomic64_set(atomic64_t *v, long long i)
261{ 241{
262 __asm__ __volatile__("@ atomic64_set\n" 242 __asm__ __volatile__("@ atomic64_set\n"
263" strd %2, %H2, [%1]" 243" strd %2, %H2, [%1]"
@@ -266,9 +246,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
266 ); 246 );
267} 247}
268#else 248#else
269static inline u64 atomic64_read(const atomic64_t *v) 249static inline long long atomic64_read(const atomic64_t *v)
270{ 250{
271 u64 result; 251 long long result;
272 252
273 __asm__ __volatile__("@ atomic64_read\n" 253 __asm__ __volatile__("@ atomic64_read\n"
274" ldrexd %0, %H0, [%1]" 254" ldrexd %0, %H0, [%1]"
@@ -279,10 +259,11 @@ static inline u64 atomic64_read(const atomic64_t *v)
279 return result; 259 return result;
280} 260}
281 261
282static inline void atomic64_set(atomic64_t *v, u64 i) 262static inline void atomic64_set(atomic64_t *v, long long i)
283{ 263{
284 u64 tmp; 264 long long tmp;
285 265
266 prefetchw(&v->counter);
286 __asm__ __volatile__("@ atomic64_set\n" 267 __asm__ __volatile__("@ atomic64_set\n"
287"1: ldrexd %0, %H0, [%2]\n" 268"1: ldrexd %0, %H0, [%2]\n"
288" strexd %0, %3, %H3, [%2]\n" 269" strexd %0, %3, %H3, [%2]\n"
@@ -294,15 +275,16 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
294} 275}
295#endif 276#endif
296 277
297static inline void atomic64_add(u64 i, atomic64_t *v) 278static inline void atomic64_add(long long i, atomic64_t *v)
298{ 279{
299 u64 result; 280 long long result;
300 unsigned long tmp; 281 unsigned long tmp;
301 282
283 prefetchw(&v->counter);
302 __asm__ __volatile__("@ atomic64_add\n" 284 __asm__ __volatile__("@ atomic64_add\n"
303"1: ldrexd %0, %H0, [%3]\n" 285"1: ldrexd %0, %H0, [%3]\n"
304" adds %0, %0, %4\n" 286" adds %Q0, %Q0, %Q4\n"
305" adc %H0, %H0, %H4\n" 287" adc %R0, %R0, %R4\n"
306" strexd %1, %0, %H0, [%3]\n" 288" strexd %1, %0, %H0, [%3]\n"
307" teq %1, #0\n" 289" teq %1, #0\n"
308" bne 1b" 290" bne 1b"
@@ -311,17 +293,17 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
311 : "cc"); 293 : "cc");
312} 294}
313 295
314static inline u64 atomic64_add_return(u64 i, atomic64_t *v) 296static inline long long atomic64_add_return(long long i, atomic64_t *v)
315{ 297{
316 u64 result; 298 long long result;
317 unsigned long tmp; 299 unsigned long tmp;
318 300
319 smp_mb(); 301 smp_mb();
320 302
321 __asm__ __volatile__("@ atomic64_add_return\n" 303 __asm__ __volatile__("@ atomic64_add_return\n"
322"1: ldrexd %0, %H0, [%3]\n" 304"1: ldrexd %0, %H0, [%3]\n"
323" adds %0, %0, %4\n" 305" adds %Q0, %Q0, %Q4\n"
324" adc %H0, %H0, %H4\n" 306" adc %R0, %R0, %R4\n"
325" strexd %1, %0, %H0, [%3]\n" 307" strexd %1, %0, %H0, [%3]\n"
326" teq %1, #0\n" 308" teq %1, #0\n"
327" bne 1b" 309" bne 1b"
@@ -334,15 +316,16 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
334 return result; 316 return result;
335} 317}
336 318
337static inline void atomic64_sub(u64 i, atomic64_t *v) 319static inline void atomic64_sub(long long i, atomic64_t *v)
338{ 320{
339 u64 result; 321 long long result;
340 unsigned long tmp; 322 unsigned long tmp;
341 323
324 prefetchw(&v->counter);
342 __asm__ __volatile__("@ atomic64_sub\n" 325 __asm__ __volatile__("@ atomic64_sub\n"
343"1: ldrexd %0, %H0, [%3]\n" 326"1: ldrexd %0, %H0, [%3]\n"
344" subs %0, %0, %4\n" 327" subs %Q0, %Q0, %Q4\n"
345" sbc %H0, %H0, %H4\n" 328" sbc %R0, %R0, %R4\n"
346" strexd %1, %0, %H0, [%3]\n" 329" strexd %1, %0, %H0, [%3]\n"
347" teq %1, #0\n" 330" teq %1, #0\n"
348" bne 1b" 331" bne 1b"
@@ -351,17 +334,17 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
351 : "cc"); 334 : "cc");
352} 335}
353 336
354static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) 337static inline long long atomic64_sub_return(long long i, atomic64_t *v)
355{ 338{
356 u64 result; 339 long long result;
357 unsigned long tmp; 340 unsigned long tmp;
358 341
359 smp_mb(); 342 smp_mb();
360 343
361 __asm__ __volatile__("@ atomic64_sub_return\n" 344 __asm__ __volatile__("@ atomic64_sub_return\n"
362"1: ldrexd %0, %H0, [%3]\n" 345"1: ldrexd %0, %H0, [%3]\n"
363" subs %0, %0, %4\n" 346" subs %Q0, %Q0, %Q4\n"
364" sbc %H0, %H0, %H4\n" 347" sbc %R0, %R0, %R4\n"
365" strexd %1, %0, %H0, [%3]\n" 348" strexd %1, %0, %H0, [%3]\n"
366" teq %1, #0\n" 349" teq %1, #0\n"
367" bne 1b" 350" bne 1b"
@@ -374,9 +357,10 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
374 return result; 357 return result;
375} 358}
376 359
377static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) 360static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old,
361 long long new)
378{ 362{
379 u64 oldval; 363 long long oldval;
380 unsigned long res; 364 unsigned long res;
381 365
382 smp_mb(); 366 smp_mb();
@@ -398,9 +382,9 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
398 return oldval; 382 return oldval;
399} 383}
400 384
401static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) 385static inline long long atomic64_xchg(atomic64_t *ptr, long long new)
402{ 386{
403 u64 result; 387 long long result;
404 unsigned long tmp; 388 unsigned long tmp;
405 389
406 smp_mb(); 390 smp_mb();
@@ -419,18 +403,18 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
419 return result; 403 return result;
420} 404}
421 405
422static inline u64 atomic64_dec_if_positive(atomic64_t *v) 406static inline long long atomic64_dec_if_positive(atomic64_t *v)
423{ 407{
424 u64 result; 408 long long result;
425 unsigned long tmp; 409 unsigned long tmp;
426 410
427 smp_mb(); 411 smp_mb();
428 412
429 __asm__ __volatile__("@ atomic64_dec_if_positive\n" 413 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
430"1: ldrexd %0, %H0, [%3]\n" 414"1: ldrexd %0, %H0, [%3]\n"
431" subs %0, %0, #1\n" 415" subs %Q0, %Q0, #1\n"
432" sbc %H0, %H0, #0\n" 416" sbc %R0, %R0, #0\n"
433" teq %H0, #0\n" 417" teq %R0, #0\n"
434" bmi 2f\n" 418" bmi 2f\n"
435" strexd %1, %0, %H0, [%3]\n" 419" strexd %1, %0, %H0, [%3]\n"
436" teq %1, #0\n" 420" teq %1, #0\n"
@@ -445,9 +429,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v)
445 return result; 429 return result;
446} 430}
447 431
448static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) 432static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
449{ 433{
450 u64 val; 434 long long val;
451 unsigned long tmp; 435 unsigned long tmp;
452 int ret = 1; 436 int ret = 1;
453 437
@@ -459,8 +443,8 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
459" teqeq %H0, %H5\n" 443" teqeq %H0, %H5\n"
460" moveq %1, #0\n" 444" moveq %1, #0\n"
461" beq 2f\n" 445" beq 2f\n"
462" adds %0, %0, %6\n" 446" adds %Q0, %Q0, %Q6\n"
463" adc %H0, %H0, %H6\n" 447" adc %R0, %R0, %R6\n"
464" strexd %2, %0, %H0, [%4]\n" 448" strexd %2, %0, %H0, [%4]\n"
465" teq %2, #0\n" 449" teq %2, #0\n"
466" bne 1b\n" 450" bne 1b\n"
diff --git a/arch/arm/include/asm/bL_switcher.h b/arch/arm/include/asm/bL_switcher.h
new file mode 100644
index 000000000000..1714800fa113
--- /dev/null
+++ b/arch/arm/include/asm/bL_switcher.h
@@ -0,0 +1,77 @@
1/*
2 * arch/arm/include/asm/bL_switcher.h
3 *
4 * Created by: Nicolas Pitre, April 2012
5 * Copyright: (C) 2012-2013 Linaro Limited
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef ASM_BL_SWITCHER_H
13#define ASM_BL_SWITCHER_H
14
15#include <linux/compiler.h>
16#include <linux/types.h>
17
18typedef void (*bL_switch_completion_handler)(void *cookie);
19
20int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
21 bL_switch_completion_handler completer,
22 void *completer_cookie);
23static inline int bL_switch_request(unsigned int cpu, unsigned int new_cluster_id)
24{
25 return bL_switch_request_cb(cpu, new_cluster_id, NULL, NULL);
26}
27
28/*
29 * Register here to be notified about runtime enabling/disabling of
30 * the switcher.
31 *
32 * The notifier chain is called with the switcher activation lock held:
33 * the switcher will not be enabled or disabled during callbacks.
34 * Callbacks must not call bL_switcher_{get,put}_enabled().
35 */
36#define BL_NOTIFY_PRE_ENABLE 0
37#define BL_NOTIFY_POST_ENABLE 1
38#define BL_NOTIFY_PRE_DISABLE 2
39#define BL_NOTIFY_POST_DISABLE 3
40
41#ifdef CONFIG_BL_SWITCHER
42
43int bL_switcher_register_notifier(struct notifier_block *nb);
44int bL_switcher_unregister_notifier(struct notifier_block *nb);
45
46/*
47 * Use these functions to temporarily prevent enabling/disabling of
48 * the switcher.
49 * bL_switcher_get_enabled() returns true if the switcher is currently
50 * enabled. Each call to bL_switcher_get_enabled() must be followed
51 * by a call to bL_switcher_put_enabled(). These functions are not
52 * recursive.
53 */
54bool bL_switcher_get_enabled(void);
55void bL_switcher_put_enabled(void);
56
57int bL_switcher_trace_trigger(void);
58int bL_switcher_get_logical_index(u32 mpidr);
59
60#else
61static inline int bL_switcher_register_notifier(struct notifier_block *nb)
62{
63 return 0;
64}
65
66static inline int bL_switcher_unregister_notifier(struct notifier_block *nb)
67{
68 return 0;
69}
70
71static inline bool bL_switcher_get_enabled(void) { return false; }
72static inline void bL_switcher_put_enabled(void) { }
73static inline int bL_switcher_trace_trigger(void) { return 0; }
74static inline int bL_switcher_get_logical_index(u32 mpidr) { return -EUNATCH; }
75#endif /* CONFIG_BL_SWITCHER */
76
77#endif
diff --git a/arch/arm/include/asm/bug.h b/arch/arm/include/asm/bug.h
index 7af5c6c3653a..b274bde24905 100644
--- a/arch/arm/include/asm/bug.h
+++ b/arch/arm/include/asm/bug.h
@@ -2,6 +2,8 @@
2#define _ASMARM_BUG_H 2#define _ASMARM_BUG_H
3 3
4#include <linux/linkage.h> 4#include <linux/linkage.h>
5#include <linux/types.h>
6#include <asm/opcodes.h>
5 7
6#ifdef CONFIG_BUG 8#ifdef CONFIG_BUG
7 9
@@ -12,10 +14,10 @@
12 */ 14 */
13#ifdef CONFIG_THUMB2_KERNEL 15#ifdef CONFIG_THUMB2_KERNEL
14#define BUG_INSTR_VALUE 0xde02 16#define BUG_INSTR_VALUE 0xde02
15#define BUG_INSTR_TYPE ".hword " 17#define BUG_INSTR(__value) __inst_thumb16(__value)
16#else 18#else
17#define BUG_INSTR_VALUE 0xe7f001f2 19#define BUG_INSTR_VALUE 0xe7f001f2
18#define BUG_INSTR_TYPE ".word " 20#define BUG_INSTR(__value) __inst_arm(__value)
19#endif 21#endif
20 22
21 23
@@ -33,7 +35,7 @@
33 35
34#define __BUG(__file, __line, __value) \ 36#define __BUG(__file, __line, __value) \
35do { \ 37do { \
36 asm volatile("1:\t" BUG_INSTR_TYPE #__value "\n" \ 38 asm volatile("1:\t" BUG_INSTR(__value) "\n" \
37 ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \ 39 ".pushsection .rodata.str, \"aMS\", %progbits, 1\n" \
38 "2:\t.asciz " #__file "\n" \ 40 "2:\t.asciz " #__file "\n" \
39 ".popsection\n" \ 41 ".popsection\n" \
@@ -48,7 +50,7 @@ do { \
48 50
49#define __BUG(__file, __line, __value) \ 51#define __BUG(__file, __line, __value) \
50do { \ 52do { \
51 asm volatile(BUG_INSTR_TYPE #__value); \ 53 asm volatile(BUG_INSTR(__value) "\n"); \
52 unreachable(); \ 54 unreachable(); \
53} while (0) 55} while (0)
54#endif /* CONFIG_DEBUG_BUGVERBOSE */ 56#endif /* CONFIG_DEBUG_BUGVERBOSE */
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 15f2d5bf8875..ee753f1749cd 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -435,4 +435,50 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size)
435#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) 435#define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr))
436#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) 436#define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr))
437 437
438/*
439 * Disabling cache access for one CPU in an ARMv7 SMP system is tricky.
440 * To do so we must:
441 *
442 * - Clear the SCTLR.C bit to prevent further cache allocations
443 * - Flush the desired level of cache
444 * - Clear the ACTLR "SMP" bit to disable local coherency
445 *
446 * ... and so without any intervening memory access in between those steps,
447 * not even to the stack.
448 *
449 * WARNING -- After this has been called:
450 *
451 * - No ldrex/strex (and similar) instructions must be used.
452 * - The CPU is obviously no longer coherent with the other CPUs.
453 * - This is unlikely to work as expected if Linux is running non-secure.
454 *
455 * Note:
456 *
457 * - This is known to apply to several ARMv7 processor implementations,
458 * however some exceptions may exist. Caveat emptor.
459 *
460 * - The clobber list is dictated by the call to v7_flush_dcache_*.
461 * fp is preserved to the stack explicitly prior disabling the cache
462 * since adding it to the clobber list is incompatible with having
463 * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering
464 * trampoline are inserted by the linker and to keep sp 64-bit aligned.
465 */
466#define v7_exit_coherency_flush(level) \
467 asm volatile( \
468 "stmfd sp!, {fp, ip} \n\t" \
469 "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \
470 "bic r0, r0, #"__stringify(CR_C)" \n\t" \
471 "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \
472 "isb \n\t" \
473 "bl v7_flush_dcache_"__stringify(level)" \n\t" \
474 "clrex \n\t" \
475 "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \
476 "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \
477 "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \
478 "isb \n\t" \
479 "dsb \n\t" \
480 "ldmfd sp!, {fp, ip}" \
481 : : : "r0","r1","r2","r3","r4","r5","r6","r7", \
482 "r9","r10","lr","memory" )
483
438#endif 484#endif
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
index 4f009c10540d..df2fbba7efc8 100644
--- a/arch/arm/include/asm/cmpxchg.h
+++ b/arch/arm/include/asm/cmpxchg.h
@@ -223,6 +223,42 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
223 return ret; 223 return ret;
224} 224}
225 225
226static inline unsigned long long __cmpxchg64(unsigned long long *ptr,
227 unsigned long long old,
228 unsigned long long new)
229{
230 unsigned long long oldval;
231 unsigned long res;
232
233 __asm__ __volatile__(
234"1: ldrexd %1, %H1, [%3]\n"
235" teq %1, %4\n"
236" teqeq %H1, %H4\n"
237" bne 2f\n"
238" strexd %0, %5, %H5, [%3]\n"
239" teq %0, #0\n"
240" bne 1b\n"
241"2:"
242 : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr)
243 : "r" (ptr), "r" (old), "r" (new)
244 : "cc");
245
246 return oldval;
247}
248
249static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr,
250 unsigned long long old,
251 unsigned long long new)
252{
253 unsigned long long ret;
254
255 smp_mb();
256 ret = __cmpxchg64(ptr, old, new);
257 smp_mb();
258
259 return ret;
260}
261
226#define cmpxchg_local(ptr,o,n) \ 262#define cmpxchg_local(ptr,o,n) \
227 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ 263 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \
228 (unsigned long)(o), \ 264 (unsigned long)(o), \
@@ -230,18 +266,16 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
230 sizeof(*(ptr)))) 266 sizeof(*(ptr))))
231 267
232#define cmpxchg64(ptr, o, n) \ 268#define cmpxchg64(ptr, o, n) \
233 ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \ 269 ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \
234 atomic64_t, \ 270 (unsigned long long)(o), \
235 counter), \ 271 (unsigned long long)(n)))
236 (unsigned long long)(o), \ 272
237 (unsigned long long)(n))) 273#define cmpxchg64_relaxed(ptr, o, n) \
238 274 ((__typeof__(*(ptr)))__cmpxchg64((ptr), \
239#define cmpxchg64_local(ptr, o, n) \ 275 (unsigned long long)(o), \
240 ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \ 276 (unsigned long long)(n)))
241 local64_t, \ 277
242 a), \ 278#define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n))
243 (unsigned long long)(o), \
244 (unsigned long long)(n)))
245 279
246#endif /* __LINUX_ARM_ARCH__ >= 6 */ 280#endif /* __LINUX_ARM_ARCH__ >= 6 */
247 281
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 9672e978d50d..acdde76b39bb 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -10,6 +10,7 @@
10#define CPUID_TLBTYPE 3 10#define CPUID_TLBTYPE 3
11#define CPUID_MPUIR 4 11#define CPUID_MPUIR 4
12#define CPUID_MPIDR 5 12#define CPUID_MPIDR 5
13#define CPUID_REVIDR 6
13 14
14#ifdef CONFIG_CPU_V7M 15#ifdef CONFIG_CPU_V7M
15#define CPUID_EXT_PFR0 0x40 16#define CPUID_EXT_PFR0 0x40
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h
index 2740c2a2df63..fe3ea776dc34 100644
--- a/arch/arm/include/asm/hardirq.h
+++ b/arch/arm/include/asm/hardirq.h
@@ -5,7 +5,7 @@
5#include <linux/threads.h> 5#include <linux/threads.h>
6#include <asm/irq.h> 6#include <asm/irq.h>
7 7
8#define NR_IPI 6 8#define NR_IPI 8
9 9
10typedef struct { 10typedef struct {
11 unsigned int __softirq_pending; 11 unsigned int __softirq_pending;
diff --git a/arch/arm/include/asm/hardware/coresight.h b/arch/arm/include/asm/hardware/coresight.h
index 0cf7a6b842ff..ad774f37c47c 100644
--- a/arch/arm/include/asm/hardware/coresight.h
+++ b/arch/arm/include/asm/hardware/coresight.h
@@ -24,8 +24,8 @@
24#define TRACER_TIMEOUT 10000 24#define TRACER_TIMEOUT 10000
25 25
26#define etm_writel(t, v, x) \ 26#define etm_writel(t, v, x) \
27 (__raw_writel((v), (t)->etm_regs + (x))) 27 (writel_relaxed((v), (t)->etm_regs + (x)))
28#define etm_readl(t, x) (__raw_readl((t)->etm_regs + (x))) 28#define etm_readl(t, x) (readl_relaxed((t)->etm_regs + (x)))
29 29
30/* CoreSight Management Registers */ 30/* CoreSight Management Registers */
31#define CSMR_LOCKACCESS 0xfb0 31#define CSMR_LOCKACCESS 0xfb0
@@ -142,8 +142,8 @@
142#define ETBFF_TRIGFL BIT(10) 142#define ETBFF_TRIGFL BIT(10)
143 143
144#define etb_writel(t, v, x) \ 144#define etb_writel(t, v, x) \
145 (__raw_writel((v), (t)->etb_regs + (x))) 145 (writel_relaxed((v), (t)->etb_regs + (x)))
146#define etb_readl(t, x) (__raw_readl((t)->etb_regs + (x))) 146#define etb_readl(t, x) (readl_relaxed((t)->etb_regs + (x)))
147 147
148#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0) 148#define etm_lock(t) do { etm_writel((t), 0, CSMR_LOCKACCESS); } while (0)
149#define etm_unlock(t) \ 149#define etm_unlock(t) \
diff --git a/arch/arm/include/asm/kgdb.h b/arch/arm/include/asm/kgdb.h
index 48066ce9ea34..0a9d5dd93294 100644
--- a/arch/arm/include/asm/kgdb.h
+++ b/arch/arm/include/asm/kgdb.h
@@ -11,6 +11,7 @@
11#define __ARM_KGDB_H__ 11#define __ARM_KGDB_H__
12 12
13#include <linux/ptrace.h> 13#include <linux/ptrace.h>
14#include <asm/opcodes.h>
14 15
15/* 16/*
16 * GDB assumes that we're a user process being debugged, so 17 * GDB assumes that we're a user process being debugged, so
@@ -41,7 +42,7 @@
41 42
42static inline void arch_kgdb_breakpoint(void) 43static inline void arch_kgdb_breakpoint(void)
43{ 44{
44 asm(".word 0xe7ffdeff"); 45 asm(__inst_arm(0xe7ffdeff));
45} 46}
46 47
47extern void kgdb_handle_bus_error(void); 48extern void kgdb_handle_bus_error(void);
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index 402a2bc6aa68..17a3fa2979e8 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -49,6 +49,7 @@ struct machine_desc {
49 bool (*smp_init)(void); 49 bool (*smp_init)(void);
50 void (*fixup)(struct tag *, char **, 50 void (*fixup)(struct tag *, char **,
51 struct meminfo *); 51 struct meminfo *);
52 void (*init_meminfo)(void);
52 void (*reserve)(void);/* reserve mem blocks */ 53 void (*reserve)(void);/* reserve mem blocks */
53 void (*map_io)(void);/* IO mapping function */ 54 void (*map_io)(void);/* IO mapping function */
54 void (*init_early)(void); 55 void (*init_early)(void);
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index fc82a88f5b69..608516ebabfe 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -42,6 +42,14 @@ extern void mcpm_entry_point(void);
42void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr); 42void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr);
43 43
44/* 44/*
45 * This sets an early poke i.e a value to be poked into some address
46 * from very early assembly code before the CPU is ungated. The
47 * address must be physical, and if 0 then nothing will happen.
48 */
49void mcpm_set_early_poke(unsigned cpu, unsigned cluster,
50 unsigned long poke_phys_addr, unsigned long poke_val);
51
52/*
45 * CPU/cluster power operations API for higher subsystems to use. 53 * CPU/cluster power operations API for higher subsystems to use.
46 */ 54 */
47 55
@@ -81,10 +89,40 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
81 * 89 *
82 * This will return if mcpm_platform_register() has not been called 90 * This will return if mcpm_platform_register() has not been called
83 * previously in which case the caller should take appropriate action. 91 * previously in which case the caller should take appropriate action.
92 *
93 * On success, the CPU is not guaranteed to be truly halted until
94 * mcpm_cpu_power_down_finish() subsequently returns non-zero for the
95 * specified cpu. Until then, other CPUs should make sure they do not
96 * trash memory the target CPU might be executing/accessing.
84 */ 97 */
85void mcpm_cpu_power_down(void); 98void mcpm_cpu_power_down(void);
86 99
87/** 100/**
101 * mcpm_cpu_power_down_finish - wait for a specified CPU to halt, and
102 * make sure it is powered off
103 *
104 * @cpu: CPU number within given cluster
105 * @cluster: cluster number for the CPU
106 *
107 * Call this function to ensure that a pending powerdown has taken
108 * effect and the CPU is safely parked before performing non-mcpm
109 * operations that may affect the CPU (such as kexec trashing the
110 * kernel text).
111 *
112 * It is *not* necessary to call this function if you only need to
113 * serialise a pending powerdown with mcpm_cpu_power_up() or a wakeup
114 * event.
115 *
116 * Do not call this function unless the specified CPU has already
117 * called mcpm_cpu_power_down() or has committed to doing so.
118 *
119 * @return:
120 * - zero if the CPU is in a safely parked state
121 * - nonzero otherwise (e.g., timeout)
122 */
123int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster);
124
125/**
88 * mcpm_cpu_suspend - bring the calling CPU in a suspended state 126 * mcpm_cpu_suspend - bring the calling CPU in a suspended state
89 * 127 *
90 * @expected_residency: duration in microseconds the CPU is expected 128 * @expected_residency: duration in microseconds the CPU is expected
@@ -126,6 +164,7 @@ int mcpm_cpu_powered_up(void);
126struct mcpm_platform_ops { 164struct mcpm_platform_ops {
127 int (*power_up)(unsigned int cpu, unsigned int cluster); 165 int (*power_up)(unsigned int cpu, unsigned int cluster);
128 void (*power_down)(void); 166 void (*power_down)(void);
167 int (*power_down_finish)(unsigned int cpu, unsigned int cluster);
129 void (*suspend)(u64); 168 void (*suspend)(u64);
130 void (*powered_up)(void); 169 void (*powered_up)(void);
131}; 170};
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index e750a938fd3c..4dd21457ef9d 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -172,8 +172,13 @@
172 * so that all we need to do is modify the 8-bit constant field. 172 * so that all we need to do is modify the 8-bit constant field.
173 */ 173 */
174#define __PV_BITS_31_24 0x81000000 174#define __PV_BITS_31_24 0x81000000
175#define __PV_BITS_7_0 0x81
176
177extern u64 __pv_phys_offset;
178extern u64 __pv_offset;
179extern void fixup_pv_table(const void *, unsigned long);
180extern const void *__pv_table_begin, *__pv_table_end;
175 181
176extern unsigned long __pv_phys_offset;
177#define PHYS_OFFSET __pv_phys_offset 182#define PHYS_OFFSET __pv_phys_offset
178 183
179#define __pv_stub(from,to,instr,type) \ 184#define __pv_stub(from,to,instr,type) \
@@ -185,22 +190,58 @@ extern unsigned long __pv_phys_offset;
185 : "=r" (to) \ 190 : "=r" (to) \
186 : "r" (from), "I" (type)) 191 : "r" (from), "I" (type))
187 192
188static inline unsigned long __virt_to_phys(unsigned long x) 193#define __pv_stub_mov_hi(t) \
194 __asm__ volatile("@ __pv_stub_mov\n" \
195 "1: mov %R0, %1\n" \
196 " .pushsection .pv_table,\"a\"\n" \
197 " .long 1b\n" \
198 " .popsection\n" \
199 : "=r" (t) \
200 : "I" (__PV_BITS_7_0))
201
202#define __pv_add_carry_stub(x, y) \
203 __asm__ volatile("@ __pv_add_carry_stub\n" \
204 "1: adds %Q0, %1, %2\n" \
205 " adc %R0, %R0, #0\n" \
206 " .pushsection .pv_table,\"a\"\n" \
207 " .long 1b\n" \
208 " .popsection\n" \
209 : "+r" (y) \
210 : "r" (x), "I" (__PV_BITS_31_24) \
211 : "cc")
212
213static inline phys_addr_t __virt_to_phys(unsigned long x)
189{ 214{
190 unsigned long t; 215 phys_addr_t t;
191 __pv_stub(x, t, "add", __PV_BITS_31_24); 216
217 if (sizeof(phys_addr_t) == 4) {
218 __pv_stub(x, t, "add", __PV_BITS_31_24);
219 } else {
220 __pv_stub_mov_hi(t);
221 __pv_add_carry_stub(x, t);
222 }
192 return t; 223 return t;
193} 224}
194 225
195static inline unsigned long __phys_to_virt(unsigned long x) 226static inline unsigned long __phys_to_virt(phys_addr_t x)
196{ 227{
197 unsigned long t; 228 unsigned long t;
198 __pv_stub(x, t, "sub", __PV_BITS_31_24); 229 __pv_stub(x, t, "sub", __PV_BITS_31_24);
199 return t; 230 return t;
200} 231}
232
201#else 233#else
202#define __virt_to_phys(x) ((x) - PAGE_OFFSET + PHYS_OFFSET) 234
203#define __phys_to_virt(x) ((x) - PHYS_OFFSET + PAGE_OFFSET) 235static inline phys_addr_t __virt_to_phys(unsigned long x)
236{
237 return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
238}
239
240static inline unsigned long __phys_to_virt(phys_addr_t x)
241{
242 return x - PHYS_OFFSET + PAGE_OFFSET;
243}
244
204#endif 245#endif
205#endif 246#endif
206#endif /* __ASSEMBLY__ */ 247#endif /* __ASSEMBLY__ */
@@ -238,16 +279,33 @@ static inline phys_addr_t virt_to_phys(const volatile void *x)
238 279
239static inline void *phys_to_virt(phys_addr_t x) 280static inline void *phys_to_virt(phys_addr_t x)
240{ 281{
241 return (void *)(__phys_to_virt((unsigned long)(x))); 282 return (void *)__phys_to_virt(x);
242} 283}
243 284
244/* 285/*
245 * Drivers should NOT use these either. 286 * Drivers should NOT use these either.
246 */ 287 */
247#define __pa(x) __virt_to_phys((unsigned long)(x)) 288#define __pa(x) __virt_to_phys((unsigned long)(x))
248#define __va(x) ((void *)__phys_to_virt((unsigned long)(x))) 289#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
249#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) 290#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
250 291
292extern phys_addr_t (*arch_virt_to_idmap)(unsigned long x);
293
294/*
295 * These are for systems that have a hardware interconnect supported alias of
296 * physical memory for idmap purposes. Most cases should leave these
297 * untouched.
298 */
299static inline phys_addr_t __virt_to_idmap(unsigned long x)
300{
301 if (arch_virt_to_idmap)
302 return arch_virt_to_idmap(x);
303 else
304 return __virt_to_phys(x);
305}
306
307#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
308
251/* 309/*
252 * Virtual <-> DMA view memory address translations 310 * Virtual <-> DMA view memory address translations
253 * Again, these are *only* valid on the kernel direct mapped RAM 311 * Again, these are *only* valid on the kernel direct mapped RAM
diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h
index 6f18da09668b..64fd15159b7d 100644
--- a/arch/arm/include/asm/mmu.h
+++ b/arch/arm/include/asm/mmu.h
@@ -16,7 +16,7 @@ typedef struct {
16#ifdef CONFIG_CPU_HAS_ASID 16#ifdef CONFIG_CPU_HAS_ASID
17#define ASID_BITS 8 17#define ASID_BITS 8
18#define ASID_MASK ((~0ULL) << ASID_BITS) 18#define ASID_MASK ((~0ULL) << ASID_BITS)
19#define ASID(mm) ((mm)->context.id.counter & ~ASID_MASK) 19#define ASID(mm) ((unsigned int)((mm)->context.id.counter & ~ASID_MASK))
20#else 20#else
21#define ASID(mm) (0) 21#define ASID(mm) (0)
22#endif 22#endif
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
index f97ee02386ee..86a659a19526 100644
--- a/arch/arm/include/asm/pgtable-2level.h
+++ b/arch/arm/include/asm/pgtable-2level.h
@@ -181,6 +181,13 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
181 181
182#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) 182#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext)
183 183
184/*
185 * We don't have huge page support for short descriptors, for the moment
186 * define empty stubs for use by pin_page_for_write.
187 */
188#define pmd_hugewillfault(pmd) (0)
189#define pmd_thp_or_huge(pmd) (0)
190
184#endif /* __ASSEMBLY__ */ 191#endif /* __ASSEMBLY__ */
185 192
186#endif /* _ASM_PGTABLE_2LEVEL_H */ 193#endif /* _ASM_PGTABLE_2LEVEL_H */
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 5689c18c85f5..39c54cfa03e9 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -206,6 +206,9 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
206#define __HAVE_ARCH_PMD_WRITE 206#define __HAVE_ARCH_PMD_WRITE
207#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY)) 207#define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY))
208 208
209#define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd))
210#define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd))
211
209#ifdef CONFIG_TRANSPARENT_HUGEPAGE 212#ifdef CONFIG_TRANSPARENT_HUGEPAGE
210#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) 213#define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
211#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) 214#define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING)
diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
index 413f3876341c..c3d5fc124a05 100644
--- a/arch/arm/include/asm/processor.h
+++ b/arch/arm/include/asm/processor.h
@@ -22,6 +22,7 @@
22#include <asm/hw_breakpoint.h> 22#include <asm/hw_breakpoint.h>
23#include <asm/ptrace.h> 23#include <asm/ptrace.h>
24#include <asm/types.h> 24#include <asm/types.h>
25#include <asm/unified.h>
25 26
26#ifdef __KERNEL__ 27#ifdef __KERNEL__
27#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \ 28#define STACK_TOP ((current->personality & ADDR_LIMIT_32BIT) ? \
@@ -87,6 +88,17 @@ unsigned long get_wchan(struct task_struct *p);
87#define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc 88#define KSTK_EIP(tsk) task_pt_regs(tsk)->ARM_pc
88#define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp 89#define KSTK_ESP(tsk) task_pt_regs(tsk)->ARM_sp
89 90
91#ifdef CONFIG_SMP
92#define __ALT_SMP_ASM(smp, up) \
93 "9998: " smp "\n" \
94 " .pushsection \".alt.smp.init\", \"a\"\n" \
95 " .long 9998b\n" \
96 " " up "\n" \
97 " .popsection\n"
98#else
99#define __ALT_SMP_ASM(smp, up) up
100#endif
101
90/* 102/*
91 * Prefetching support - only ARMv5. 103 * Prefetching support - only ARMv5.
92 */ 104 */
@@ -97,17 +109,22 @@ static inline void prefetch(const void *ptr)
97{ 109{
98 __asm__ __volatile__( 110 __asm__ __volatile__(
99 "pld\t%a0" 111 "pld\t%a0"
100 : 112 :: "p" (ptr));
101 : "p" (ptr)
102 : "cc");
103} 113}
104 114
115#if __LINUX_ARM_ARCH__ >= 7 && defined(CONFIG_SMP)
105#define ARCH_HAS_PREFETCHW 116#define ARCH_HAS_PREFETCHW
106#define prefetchw(ptr) prefetch(ptr) 117static inline void prefetchw(const void *ptr)
107 118{
108#define ARCH_HAS_SPINLOCK_PREFETCH 119 __asm__ __volatile__(
109#define spin_lock_prefetch(x) do { } while (0) 120 ".arch_extension mp\n"
110 121 __ALT_SMP_ASM(
122 WASM(pldw) "\t%a0",
123 WASM(pld) "\t%a0"
124 )
125 :: "p" (ptr));
126}
127#endif
111#endif 128#endif
112 129
113#define HAVE_ARCH_PICK_MMAP_LAYOUT 130#define HAVE_ARCH_PICK_MMAP_LAYOUT
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h
index c50f05609501..8d6a089dfb76 100644
--- a/arch/arm/include/asm/setup.h
+++ b/arch/arm/include/asm/setup.h
@@ -49,7 +49,7 @@ extern struct meminfo meminfo;
49#define bank_phys_end(bank) ((bank)->start + (bank)->size) 49#define bank_phys_end(bank) ((bank)->start + (bank)->size)
50#define bank_phys_size(bank) (bank)->size 50#define bank_phys_size(bank) (bank)->size
51 51
52extern int arm_add_memory(phys_addr_t start, phys_addr_t size); 52extern int arm_add_memory(u64 start, u64 size);
53extern void early_print(const char *str, ...); 53extern void early_print(const char *str, ...);
54extern void dump_machine_table(void); 54extern void dump_machine_table(void);
55 55
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index a8cae71caceb..22a3b9b5d4a1 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -84,6 +84,8 @@ extern void arch_send_call_function_single_ipi(int cpu);
84extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); 84extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
85extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask); 85extern void arch_send_wakeup_ipi_mask(const struct cpumask *mask);
86 86
87extern int register_ipi_completion(struct completion *completion, int cpu);
88
87struct smp_operations { 89struct smp_operations {
88#ifdef CONFIG_SMP 90#ifdef CONFIG_SMP
89 /* 91 /*
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index 4f2c28060c9a..ef3c6072aa45 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -5,21 +5,13 @@
5#error SMP not supported on pre-ARMv6 CPUs 5#error SMP not supported on pre-ARMv6 CPUs
6#endif 6#endif
7 7
8#include <asm/processor.h> 8#include <linux/prefetch.h>
9 9
10/* 10/*
11 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K 11 * sev and wfe are ARMv6K extensions. Uniprocessor ARMv6 may not have the K
12 * extensions, so when running on UP, we have to patch these instructions away. 12 * extensions, so when running on UP, we have to patch these instructions away.
13 */ 13 */
14#define ALT_SMP(smp, up) \
15 "9998: " smp "\n" \
16 " .pushsection \".alt.smp.init\", \"a\"\n" \
17 " .long 9998b\n" \
18 " " up "\n" \
19 " .popsection\n"
20
21#ifdef CONFIG_THUMB2_KERNEL 14#ifdef CONFIG_THUMB2_KERNEL
22#define SEV ALT_SMP("sev.w", "nop.w")
23/* 15/*
24 * For Thumb-2, special care is needed to ensure that the conditional WFE 16 * For Thumb-2, special care is needed to ensure that the conditional WFE
25 * instruction really does assemble to exactly 4 bytes (as required by 17 * instruction really does assemble to exactly 4 bytes (as required by
@@ -31,17 +23,18 @@
31 * the assembler won't change IT instructions which are explicitly present 23 * the assembler won't change IT instructions which are explicitly present
32 * in the input. 24 * in the input.
33 */ 25 */
34#define WFE(cond) ALT_SMP( \ 26#define WFE(cond) __ALT_SMP_ASM( \
35 "it " cond "\n\t" \ 27 "it " cond "\n\t" \
36 "wfe" cond ".n", \ 28 "wfe" cond ".n", \
37 \ 29 \
38 "nop.w" \ 30 "nop.w" \
39) 31)
40#else 32#else
41#define SEV ALT_SMP("sev", "nop") 33#define WFE(cond) __ALT_SMP_ASM("wfe" cond, "nop")
42#define WFE(cond) ALT_SMP("wfe" cond, "nop")
43#endif 34#endif
44 35
36#define SEV __ALT_SMP_ASM(WASM(sev), WASM(nop))
37
45static inline void dsb_sev(void) 38static inline void dsb_sev(void)
46{ 39{
47#if __LINUX_ARM_ARCH__ >= 7 40#if __LINUX_ARM_ARCH__ >= 7
@@ -77,6 +70,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
77 u32 newval; 70 u32 newval;
78 arch_spinlock_t lockval; 71 arch_spinlock_t lockval;
79 72
73 prefetchw(&lock->slock);
80 __asm__ __volatile__( 74 __asm__ __volatile__(
81"1: ldrex %0, [%3]\n" 75"1: ldrex %0, [%3]\n"
82" add %1, %0, %4\n" 76" add %1, %0, %4\n"
@@ -100,6 +94,7 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
100 unsigned long contended, res; 94 unsigned long contended, res;
101 u32 slock; 95 u32 slock;
102 96
97 prefetchw(&lock->slock);
103 do { 98 do {
104 __asm__ __volatile__( 99 __asm__ __volatile__(
105 " ldrex %0, [%3]\n" 100 " ldrex %0, [%3]\n"
@@ -127,10 +122,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
127 dsb_sev(); 122 dsb_sev();
128} 123}
129 124
125static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
126{
127 return lock.tickets.owner == lock.tickets.next;
128}
129
130static inline int arch_spin_is_locked(arch_spinlock_t *lock) 130static inline int arch_spin_is_locked(arch_spinlock_t *lock)
131{ 131{
132 struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); 132 return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
133 return tickets.owner != tickets.next;
134} 133}
135 134
136static inline int arch_spin_is_contended(arch_spinlock_t *lock) 135static inline int arch_spin_is_contended(arch_spinlock_t *lock)
@@ -152,6 +151,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
152{ 151{
153 unsigned long tmp; 152 unsigned long tmp;
154 153
154 prefetchw(&rw->lock);
155 __asm__ __volatile__( 155 __asm__ __volatile__(
156"1: ldrex %0, [%1]\n" 156"1: ldrex %0, [%1]\n"
157" teq %0, #0\n" 157" teq %0, #0\n"
@@ -170,6 +170,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
170{ 170{
171 unsigned long contended, res; 171 unsigned long contended, res;
172 172
173 prefetchw(&rw->lock);
173 do { 174 do {
174 __asm__ __volatile__( 175 __asm__ __volatile__(
175 " ldrex %0, [%2]\n" 176 " ldrex %0, [%2]\n"
@@ -203,7 +204,7 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
203} 204}
204 205
205/* write_can_lock - would write_trylock() succeed? */ 206/* write_can_lock - would write_trylock() succeed? */
206#define arch_write_can_lock(x) ((x)->lock == 0) 207#define arch_write_can_lock(x) (ACCESS_ONCE((x)->lock) == 0)
207 208
208/* 209/*
209 * Read locks are a bit more hairy: 210 * Read locks are a bit more hairy:
@@ -221,6 +222,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
221{ 222{
222 unsigned long tmp, tmp2; 223 unsigned long tmp, tmp2;
223 224
225 prefetchw(&rw->lock);
224 __asm__ __volatile__( 226 __asm__ __volatile__(
225"1: ldrex %0, [%2]\n" 227"1: ldrex %0, [%2]\n"
226" adds %0, %0, #1\n" 228" adds %0, %0, #1\n"
@@ -241,6 +243,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
241 243
242 smp_mb(); 244 smp_mb();
243 245
246 prefetchw(&rw->lock);
244 __asm__ __volatile__( 247 __asm__ __volatile__(
245"1: ldrex %0, [%2]\n" 248"1: ldrex %0, [%2]\n"
246" sub %0, %0, #1\n" 249" sub %0, %0, #1\n"
@@ -259,6 +262,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
259{ 262{
260 unsigned long contended, res; 263 unsigned long contended, res;
261 264
265 prefetchw(&rw->lock);
262 do { 266 do {
263 __asm__ __volatile__( 267 __asm__ __volatile__(
264 " ldrex %0, [%2]\n" 268 " ldrex %0, [%2]\n"
@@ -280,7 +284,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
280} 284}
281 285
282/* read_can_lock - would read_trylock() succeed? */ 286/* read_can_lock - would read_trylock() succeed? */
283#define arch_read_can_lock(x) ((x)->lock < 0x80000000) 287#define arch_read_can_lock(x) (ACCESS_ONCE((x)->lock) < 0x80000000)
284 288
285#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 289#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
286#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 290#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h
index b262d2f8b478..47663fcb10ad 100644
--- a/arch/arm/include/asm/spinlock_types.h
+++ b/arch/arm/include/asm/spinlock_types.h
@@ -25,7 +25,7 @@ typedef struct {
25#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } 25#define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } }
26 26
27typedef struct { 27typedef struct {
28 volatile unsigned int lock; 28 u32 lock;
29} arch_rwlock_t; 29} arch_rwlock_t;
30 30
31#define __ARCH_RW_LOCK_UNLOCKED { 0 } 31#define __ARCH_RW_LOCK_UNLOCKED { 0 }
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index 38960264040c..def9e570199f 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -560,37 +560,6 @@ static inline void __flush_bp_all(void)
560 asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero)); 560 asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero));
561} 561}
562 562
563#include <asm/cputype.h>
564#ifdef CONFIG_ARM_ERRATA_798181
565static inline int erratum_a15_798181(void)
566{
567 unsigned int midr = read_cpuid_id();
568
569 /* Cortex-A15 r0p0..r3p2 affected */
570 if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
571 return 0;
572 return 1;
573}
574
575static inline void dummy_flush_tlb_a15_erratum(void)
576{
577 /*
578 * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
579 */
580 asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
581 dsb(ish);
582}
583#else
584static inline int erratum_a15_798181(void)
585{
586 return 0;
587}
588
589static inline void dummy_flush_tlb_a15_erratum(void)
590{
591}
592#endif
593
594/* 563/*
595 * flush_pmd_entry 564 * flush_pmd_entry
596 * 565 *
@@ -697,4 +666,21 @@ extern void flush_bp_all(void);
697 666
698#endif 667#endif
699 668
669#ifndef __ASSEMBLY__
670#ifdef CONFIG_ARM_ERRATA_798181
671extern void erratum_a15_798181_init(void);
672#else
673static inline void erratum_a15_798181_init(void) {}
674#endif
675extern bool (*erratum_a15_798181_handler)(void);
676
677static inline bool erratum_a15_798181(void)
678{
679 if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) &&
680 erratum_a15_798181_handler))
681 return erratum_a15_798181_handler();
682 return false;
683}
684#endif
685
700#endif 686#endif
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
index f5989f46b4d2..b88beaba6b4a 100644
--- a/arch/arm/include/asm/unified.h
+++ b/arch/arm/include/asm/unified.h
@@ -38,6 +38,8 @@
38#ifdef __ASSEMBLY__ 38#ifdef __ASSEMBLY__
39#define W(instr) instr.w 39#define W(instr) instr.w
40#define BSYM(sym) sym + 1 40#define BSYM(sym) sym + 1
41#else
42#define WASM(instr) #instr ".w"
41#endif 43#endif
42 44
43#else /* !CONFIG_THUMB2_KERNEL */ 45#else /* !CONFIG_THUMB2_KERNEL */
@@ -50,6 +52,8 @@
50#ifdef __ASSEMBLY__ 52#ifdef __ASSEMBLY__
51#define W(instr) instr 53#define W(instr) instr
52#define BSYM(sym) sym 54#define BSYM(sym) sym
55#else
56#define WASM(instr) #instr
53#endif 57#endif
54 58
55#endif /* CONFIG_THUMB2_KERNEL */ 59#endif /* CONFIG_THUMB2_KERNEL */
diff --git a/arch/arm/include/debug/efm32.S b/arch/arm/include/debug/efm32.S
new file mode 100644
index 000000000000..2265a199280c
--- /dev/null
+++ b/arch/arm/include/debug/efm32.S
@@ -0,0 +1,45 @@
1/*
2 * Copyright (C) 2013 Pengutronix
3 * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#define UARTn_CMD 0x000c
11#define UARTn_CMD_TXEN 0x0004
12
13#define UARTn_STATUS 0x0010
14#define UARTn_STATUS_TXC 0x0020
15#define UARTn_STATUS_TXBL 0x0040
16
17#define UARTn_TXDATA 0x0034
18
19 .macro addruart, rx, tmp
20 ldr \rx, =(CONFIG_DEBUG_UART_PHYS)
21
22 /*
23 * enable TX. The driver might disable it to save energy. We
24 * don't care about disabling at the end as during debug power
25 * consumption isn't that important.
26 */
27 ldr \tmp, =(UARTn_CMD_TXEN)
28 str \tmp, [\rx, #UARTn_CMD]
29 .endm
30
31 .macro senduart,rd,rx
32 strb \rd, [\rx, #UARTn_TXDATA]
33 .endm
34
35 .macro waituart,rd,rx
361001: ldr \rd, [\rx, #UARTn_STATUS]
37 tst \rd, #UARTn_STATUS_TXBL
38 beq 1001b
39 .endm
40
41 .macro busyuart,rd,rx
421001: ldr \rd, [\rx, UARTn_STATUS]
43 tst \rd, #UARTn_STATUS_TXC
44 bne 1001b
45 .endm
diff --git a/arch/arm/include/debug/msm.S b/arch/arm/include/debug/msm.S
index 9166e1bc470e..9d653d475903 100644
--- a/arch/arm/include/debug/msm.S
+++ b/arch/arm/include/debug/msm.S
@@ -46,6 +46,11 @@
46#define MSM_DEBUG_UART_PHYS 0x16440000 46#define MSM_DEBUG_UART_PHYS 0x16440000
47#endif 47#endif
48 48
49#ifdef CONFIG_DEBUG_MSM8974_UART
50#define MSM_DEBUG_UART_BASE 0xFA71E000
51#define MSM_DEBUG_UART_PHYS 0xF991E000
52#endif
53
49 .macro addruart, rp, rv, tmp 54 .macro addruart, rp, rv, tmp
50#ifdef MSM_DEBUG_UART_PHYS 55#ifdef MSM_DEBUG_UART_PHYS
51 ldr \rp, =MSM_DEBUG_UART_PHYS 56 ldr \rp, =MSM_DEBUG_UART_PHYS
diff --git a/arch/arm/include/debug/pl01x.S b/arch/arm/include/debug/pl01x.S
index 37c6895b87e6..92ef808a2337 100644
--- a/arch/arm/include/debug/pl01x.S
+++ b/arch/arm/include/debug/pl01x.S
@@ -25,12 +25,14 @@
25 25
26 .macro waituart,rd,rx 26 .macro waituart,rd,rx
271001: ldr \rd, [\rx, #UART01x_FR] 271001: ldr \rd, [\rx, #UART01x_FR]
28 ARM_BE8( rev \rd, \rd )
28 tst \rd, #UART01x_FR_TXFF 29 tst \rd, #UART01x_FR_TXFF
29 bne 1001b 30 bne 1001b
30 .endm 31 .endm
31 32
32 .macro busyuart,rd,rx 33 .macro busyuart,rd,rx
331001: ldr \rd, [\rx, #UART01x_FR] 341001: ldr \rd, [\rx, #UART01x_FR]
35 ARM_BE8( rev \rd, \rd )
34 tst \rd, #UART01x_FR_BUSY 36 tst \rd, #UART01x_FR_BUSY
35 bne 1001b 37 bne 1001b
36 .endm 38 .endm
diff --git a/arch/arm/include/uapi/asm/Kbuild b/arch/arm/include/uapi/asm/Kbuild
index 18d76fd5a2af..70a1c9da30ca 100644
--- a/arch/arm/include/uapi/asm/Kbuild
+++ b/arch/arm/include/uapi/asm/Kbuild
@@ -7,6 +7,7 @@ header-y += hwcap.h
7header-y += ioctls.h 7header-y += ioctls.h
8header-y += kvm_para.h 8header-y += kvm_para.h
9header-y += mman.h 9header-y += mman.h
10header-y += perf_regs.h
10header-y += posix_types.h 11header-y += posix_types.h
11header-y += ptrace.h 12header-y += ptrace.h
12header-y += setup.h 13header-y += setup.h
diff --git a/arch/arm/include/uapi/asm/perf_regs.h b/arch/arm/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000000..ce59448458b2
--- /dev/null
+++ b/arch/arm/include/uapi/asm/perf_regs.h
@@ -0,0 +1,23 @@
1#ifndef _ASM_ARM_PERF_REGS_H
2#define _ASM_ARM_PERF_REGS_H
3
4enum perf_event_arm_regs {
5 PERF_REG_ARM_R0,
6 PERF_REG_ARM_R1,
7 PERF_REG_ARM_R2,
8 PERF_REG_ARM_R3,
9 PERF_REG_ARM_R4,
10 PERF_REG_ARM_R5,
11 PERF_REG_ARM_R6,
12 PERF_REG_ARM_R7,
13 PERF_REG_ARM_R8,
14 PERF_REG_ARM_R9,
15 PERF_REG_ARM_R10,
16 PERF_REG_ARM_FP,
17 PERF_REG_ARM_IP,
18 PERF_REG_ARM_SP,
19 PERF_REG_ARM_LR,
20 PERF_REG_ARM_PC,
21 PERF_REG_ARM_MAX,
22};
23#endif /* _ASM_ARM_PERF_REGS_H */