diff options
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/atomic.h | 76 | ||||
-rw-r--r-- | arch/arm/include/asm/cacheflush.h | 46 | ||||
-rw-r--r-- | arch/arm/include/asm/cmpxchg.h | 58 | ||||
-rw-r--r-- | arch/arm/include/asm/cputype.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/hardirq.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/mcpm.h | 31 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable-2level.h | 7 | ||||
-rw-r--r-- | arch/arm/include/asm/pgtable-3level.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/setup.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/spinlock.h | 8 | ||||
-rw-r--r-- | arch/arm/include/asm/tlbflush.h | 48 | ||||
-rw-r--r-- | arch/arm/include/debug/efm32.S | 45 | ||||
-rw-r--r-- | arch/arm/include/debug/msm.S | 5 |
13 files changed, 236 insertions, 96 deletions
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h index da1c77d39327..f8a4336ed8fc 100644 --- a/arch/arm/include/asm/atomic.h +++ b/arch/arm/include/asm/atomic.h | |||
@@ -114,7 +114,8 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |||
114 | 114 | ||
115 | static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) | 115 | static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) |
116 | { | 116 | { |
117 | unsigned long oldval, res; | 117 | int oldval; |
118 | unsigned long res; | ||
118 | 119 | ||
119 | smp_mb(); | 120 | smp_mb(); |
120 | 121 | ||
@@ -134,21 +135,6 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new) | |||
134 | return oldval; | 135 | return oldval; |
135 | } | 136 | } |
136 | 137 | ||
137 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | ||
138 | { | ||
139 | unsigned long tmp, tmp2; | ||
140 | |||
141 | __asm__ __volatile__("@ atomic_clear_mask\n" | ||
142 | "1: ldrex %0, [%3]\n" | ||
143 | " bic %0, %0, %4\n" | ||
144 | " strex %1, %0, [%3]\n" | ||
145 | " teq %1, #0\n" | ||
146 | " bne 1b" | ||
147 | : "=&r" (tmp), "=&r" (tmp2), "+Qo" (*addr) | ||
148 | : "r" (addr), "Ir" (mask) | ||
149 | : "cc"); | ||
150 | } | ||
151 | |||
152 | #else /* ARM_ARCH_6 */ | 138 | #else /* ARM_ARCH_6 */ |
153 | 139 | ||
154 | #ifdef CONFIG_SMP | 140 | #ifdef CONFIG_SMP |
@@ -197,15 +183,6 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) | |||
197 | return ret; | 183 | return ret; |
198 | } | 184 | } |
199 | 185 | ||
200 | static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | ||
201 | { | ||
202 | unsigned long flags; | ||
203 | |||
204 | raw_local_irq_save(flags); | ||
205 | *addr &= ~mask; | ||
206 | raw_local_irq_restore(flags); | ||
207 | } | ||
208 | |||
209 | #endif /* __LINUX_ARM_ARCH__ */ | 186 | #endif /* __LINUX_ARM_ARCH__ */ |
210 | 187 | ||
211 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) | 188 | #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) |
@@ -238,15 +215,15 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) | |||
238 | 215 | ||
239 | #ifndef CONFIG_GENERIC_ATOMIC64 | 216 | #ifndef CONFIG_GENERIC_ATOMIC64 |
240 | typedef struct { | 217 | typedef struct { |
241 | u64 __aligned(8) counter; | 218 | long long counter; |
242 | } atomic64_t; | 219 | } atomic64_t; |
243 | 220 | ||
244 | #define ATOMIC64_INIT(i) { (i) } | 221 | #define ATOMIC64_INIT(i) { (i) } |
245 | 222 | ||
246 | #ifdef CONFIG_ARM_LPAE | 223 | #ifdef CONFIG_ARM_LPAE |
247 | static inline u64 atomic64_read(const atomic64_t *v) | 224 | static inline long long atomic64_read(const atomic64_t *v) |
248 | { | 225 | { |
249 | u64 result; | 226 | long long result; |
250 | 227 | ||
251 | __asm__ __volatile__("@ atomic64_read\n" | 228 | __asm__ __volatile__("@ atomic64_read\n" |
252 | " ldrd %0, %H0, [%1]" | 229 | " ldrd %0, %H0, [%1]" |
@@ -257,7 +234,7 @@ static inline u64 atomic64_read(const atomic64_t *v) | |||
257 | return result; | 234 | return result; |
258 | } | 235 | } |
259 | 236 | ||
260 | static inline void atomic64_set(atomic64_t *v, u64 i) | 237 | static inline void atomic64_set(atomic64_t *v, long long i) |
261 | { | 238 | { |
262 | __asm__ __volatile__("@ atomic64_set\n" | 239 | __asm__ __volatile__("@ atomic64_set\n" |
263 | " strd %2, %H2, [%1]" | 240 | " strd %2, %H2, [%1]" |
@@ -266,9 +243,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i) | |||
266 | ); | 243 | ); |
267 | } | 244 | } |
268 | #else | 245 | #else |
269 | static inline u64 atomic64_read(const atomic64_t *v) | 246 | static inline long long atomic64_read(const atomic64_t *v) |
270 | { | 247 | { |
271 | u64 result; | 248 | long long result; |
272 | 249 | ||
273 | __asm__ __volatile__("@ atomic64_read\n" | 250 | __asm__ __volatile__("@ atomic64_read\n" |
274 | " ldrexd %0, %H0, [%1]" | 251 | " ldrexd %0, %H0, [%1]" |
@@ -279,9 +256,9 @@ static inline u64 atomic64_read(const atomic64_t *v) | |||
279 | return result; | 256 | return result; |
280 | } | 257 | } |
281 | 258 | ||
282 | static inline void atomic64_set(atomic64_t *v, u64 i) | 259 | static inline void atomic64_set(atomic64_t *v, long long i) |
283 | { | 260 | { |
284 | u64 tmp; | 261 | long long tmp; |
285 | 262 | ||
286 | __asm__ __volatile__("@ atomic64_set\n" | 263 | __asm__ __volatile__("@ atomic64_set\n" |
287 | "1: ldrexd %0, %H0, [%2]\n" | 264 | "1: ldrexd %0, %H0, [%2]\n" |
@@ -294,9 +271,9 @@ static inline void atomic64_set(atomic64_t *v, u64 i) | |||
294 | } | 271 | } |
295 | #endif | 272 | #endif |
296 | 273 | ||
297 | static inline void atomic64_add(u64 i, atomic64_t *v) | 274 | static inline void atomic64_add(long long i, atomic64_t *v) |
298 | { | 275 | { |
299 | u64 result; | 276 | long long result; |
300 | unsigned long tmp; | 277 | unsigned long tmp; |
301 | 278 | ||
302 | __asm__ __volatile__("@ atomic64_add\n" | 279 | __asm__ __volatile__("@ atomic64_add\n" |
@@ -311,9 +288,9 @@ static inline void atomic64_add(u64 i, atomic64_t *v) | |||
311 | : "cc"); | 288 | : "cc"); |
312 | } | 289 | } |
313 | 290 | ||
314 | static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | 291 | static inline long long atomic64_add_return(long long i, atomic64_t *v) |
315 | { | 292 | { |
316 | u64 result; | 293 | long long result; |
317 | unsigned long tmp; | 294 | unsigned long tmp; |
318 | 295 | ||
319 | smp_mb(); | 296 | smp_mb(); |
@@ -334,9 +311,9 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v) | |||
334 | return result; | 311 | return result; |
335 | } | 312 | } |
336 | 313 | ||
337 | static inline void atomic64_sub(u64 i, atomic64_t *v) | 314 | static inline void atomic64_sub(long long i, atomic64_t *v) |
338 | { | 315 | { |
339 | u64 result; | 316 | long long result; |
340 | unsigned long tmp; | 317 | unsigned long tmp; |
341 | 318 | ||
342 | __asm__ __volatile__("@ atomic64_sub\n" | 319 | __asm__ __volatile__("@ atomic64_sub\n" |
@@ -351,9 +328,9 @@ static inline void atomic64_sub(u64 i, atomic64_t *v) | |||
351 | : "cc"); | 328 | : "cc"); |
352 | } | 329 | } |
353 | 330 | ||
354 | static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) | 331 | static inline long long atomic64_sub_return(long long i, atomic64_t *v) |
355 | { | 332 | { |
356 | u64 result; | 333 | long long result; |
357 | unsigned long tmp; | 334 | unsigned long tmp; |
358 | 335 | ||
359 | smp_mb(); | 336 | smp_mb(); |
@@ -374,9 +351,10 @@ static inline u64 atomic64_sub_return(u64 i, atomic64_t *v) | |||
374 | return result; | 351 | return result; |
375 | } | 352 | } |
376 | 353 | ||
377 | static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) | 354 | static inline long long atomic64_cmpxchg(atomic64_t *ptr, long long old, |
355 | long long new) | ||
378 | { | 356 | { |
379 | u64 oldval; | 357 | long long oldval; |
380 | unsigned long res; | 358 | unsigned long res; |
381 | 359 | ||
382 | smp_mb(); | 360 | smp_mb(); |
@@ -398,9 +376,9 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new) | |||
398 | return oldval; | 376 | return oldval; |
399 | } | 377 | } |
400 | 378 | ||
401 | static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) | 379 | static inline long long atomic64_xchg(atomic64_t *ptr, long long new) |
402 | { | 380 | { |
403 | u64 result; | 381 | long long result; |
404 | unsigned long tmp; | 382 | unsigned long tmp; |
405 | 383 | ||
406 | smp_mb(); | 384 | smp_mb(); |
@@ -419,9 +397,9 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new) | |||
419 | return result; | 397 | return result; |
420 | } | 398 | } |
421 | 399 | ||
422 | static inline u64 atomic64_dec_if_positive(atomic64_t *v) | 400 | static inline long long atomic64_dec_if_positive(atomic64_t *v) |
423 | { | 401 | { |
424 | u64 result; | 402 | long long result; |
425 | unsigned long tmp; | 403 | unsigned long tmp; |
426 | 404 | ||
427 | smp_mb(); | 405 | smp_mb(); |
@@ -445,9 +423,9 @@ static inline u64 atomic64_dec_if_positive(atomic64_t *v) | |||
445 | return result; | 423 | return result; |
446 | } | 424 | } |
447 | 425 | ||
448 | static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u) | 426 | static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) |
449 | { | 427 | { |
450 | u64 val; | 428 | long long val; |
451 | unsigned long tmp; | 429 | unsigned long tmp; |
452 | int ret = 1; | 430 | int ret = 1; |
453 | 431 | ||
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index 15f2d5bf8875..ee753f1749cd 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h | |||
@@ -435,4 +435,50 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size) | |||
435 | #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) | 435 | #define sync_cache_w(ptr) __sync_cache_range_w(ptr, sizeof *(ptr)) |
436 | #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) | 436 | #define sync_cache_r(ptr) __sync_cache_range_r(ptr, sizeof *(ptr)) |
437 | 437 | ||
438 | /* | ||
439 | * Disabling cache access for one CPU in an ARMv7 SMP system is tricky. | ||
440 | * To do so we must: | ||
441 | * | ||
442 | * - Clear the SCTLR.C bit to prevent further cache allocations | ||
443 | * - Flush the desired level of cache | ||
444 | * - Clear the ACTLR "SMP" bit to disable local coherency | ||
445 | * | ||
446 | * ... and so without any intervening memory access in between those steps, | ||
447 | * not even to the stack. | ||
448 | * | ||
449 | * WARNING -- After this has been called: | ||
450 | * | ||
451 | * - No ldrex/strex (and similar) instructions must be used. | ||
452 | * - The CPU is obviously no longer coherent with the other CPUs. | ||
453 | * - This is unlikely to work as expected if Linux is running non-secure. | ||
454 | * | ||
455 | * Note: | ||
456 | * | ||
457 | * - This is known to apply to several ARMv7 processor implementations, | ||
458 | * however some exceptions may exist. Caveat emptor. | ||
459 | * | ||
460 | * - The clobber list is dictated by the call to v7_flush_dcache_*. | ||
461 | * fp is preserved to the stack explicitly prior disabling the cache | ||
462 | * since adding it to the clobber list is incompatible with having | ||
463 | * CONFIG_FRAME_POINTER=y. ip is saved as well if ever r12-clobbering | ||
464 | * trampoline are inserted by the linker and to keep sp 64-bit aligned. | ||
465 | */ | ||
466 | #define v7_exit_coherency_flush(level) \ | ||
467 | asm volatile( \ | ||
468 | "stmfd sp!, {fp, ip} \n\t" \ | ||
469 | "mrc p15, 0, r0, c1, c0, 0 @ get SCTLR \n\t" \ | ||
470 | "bic r0, r0, #"__stringify(CR_C)" \n\t" \ | ||
471 | "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \ | ||
472 | "isb \n\t" \ | ||
473 | "bl v7_flush_dcache_"__stringify(level)" \n\t" \ | ||
474 | "clrex \n\t" \ | ||
475 | "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \ | ||
476 | "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \ | ||
477 | "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \ | ||
478 | "isb \n\t" \ | ||
479 | "dsb \n\t" \ | ||
480 | "ldmfd sp!, {fp, ip}" \ | ||
481 | : : : "r0","r1","r2","r3","r4","r5","r6","r7", \ | ||
482 | "r9","r10","lr","memory" ) | ||
483 | |||
438 | #endif | 484 | #endif |
diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h index 4f009c10540d..df2fbba7efc8 100644 --- a/arch/arm/include/asm/cmpxchg.h +++ b/arch/arm/include/asm/cmpxchg.h | |||
@@ -223,6 +223,42 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, | |||
223 | return ret; | 223 | return ret; |
224 | } | 224 | } |
225 | 225 | ||
226 | static inline unsigned long long __cmpxchg64(unsigned long long *ptr, | ||
227 | unsigned long long old, | ||
228 | unsigned long long new) | ||
229 | { | ||
230 | unsigned long long oldval; | ||
231 | unsigned long res; | ||
232 | |||
233 | __asm__ __volatile__( | ||
234 | "1: ldrexd %1, %H1, [%3]\n" | ||
235 | " teq %1, %4\n" | ||
236 | " teqeq %H1, %H4\n" | ||
237 | " bne 2f\n" | ||
238 | " strexd %0, %5, %H5, [%3]\n" | ||
239 | " teq %0, #0\n" | ||
240 | " bne 1b\n" | ||
241 | "2:" | ||
242 | : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr) | ||
243 | : "r" (ptr), "r" (old), "r" (new) | ||
244 | : "cc"); | ||
245 | |||
246 | return oldval; | ||
247 | } | ||
248 | |||
249 | static inline unsigned long long __cmpxchg64_mb(unsigned long long *ptr, | ||
250 | unsigned long long old, | ||
251 | unsigned long long new) | ||
252 | { | ||
253 | unsigned long long ret; | ||
254 | |||
255 | smp_mb(); | ||
256 | ret = __cmpxchg64(ptr, old, new); | ||
257 | smp_mb(); | ||
258 | |||
259 | return ret; | ||
260 | } | ||
261 | |||
226 | #define cmpxchg_local(ptr,o,n) \ | 262 | #define cmpxchg_local(ptr,o,n) \ |
227 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ | 263 | ((__typeof__(*(ptr)))__cmpxchg_local((ptr), \ |
228 | (unsigned long)(o), \ | 264 | (unsigned long)(o), \ |
@@ -230,18 +266,16 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, | |||
230 | sizeof(*(ptr)))) | 266 | sizeof(*(ptr)))) |
231 | 267 | ||
232 | #define cmpxchg64(ptr, o, n) \ | 268 | #define cmpxchg64(ptr, o, n) \ |
233 | ((__typeof__(*(ptr)))atomic64_cmpxchg(container_of((ptr), \ | 269 | ((__typeof__(*(ptr)))__cmpxchg64_mb((ptr), \ |
234 | atomic64_t, \ | 270 | (unsigned long long)(o), \ |
235 | counter), \ | 271 | (unsigned long long)(n))) |
236 | (unsigned long long)(o), \ | 272 | |
237 | (unsigned long long)(n))) | 273 | #define cmpxchg64_relaxed(ptr, o, n) \ |
238 | 274 | ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ | |
239 | #define cmpxchg64_local(ptr, o, n) \ | 275 | (unsigned long long)(o), \ |
240 | ((__typeof__(*(ptr)))local64_cmpxchg(container_of((ptr), \ | 276 | (unsigned long long)(n))) |
241 | local64_t, \ | 277 | |
242 | a), \ | 278 | #define cmpxchg64_local(ptr, o, n) cmpxchg64_relaxed((ptr), (o), (n)) |
243 | (unsigned long long)(o), \ | ||
244 | (unsigned long long)(n))) | ||
245 | 279 | ||
246 | #endif /* __LINUX_ARM_ARCH__ >= 6 */ | 280 | #endif /* __LINUX_ARM_ARCH__ >= 6 */ |
247 | 281 | ||
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 9672e978d50d..acdde76b39bb 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #define CPUID_TLBTYPE 3 | 10 | #define CPUID_TLBTYPE 3 |
11 | #define CPUID_MPUIR 4 | 11 | #define CPUID_MPUIR 4 |
12 | #define CPUID_MPIDR 5 | 12 | #define CPUID_MPIDR 5 |
13 | #define CPUID_REVIDR 6 | ||
13 | 14 | ||
14 | #ifdef CONFIG_CPU_V7M | 15 | #ifdef CONFIG_CPU_V7M |
15 | #define CPUID_EXT_PFR0 0x40 | 16 | #define CPUID_EXT_PFR0 0x40 |
diff --git a/arch/arm/include/asm/hardirq.h b/arch/arm/include/asm/hardirq.h index 2740c2a2df63..3d7351c844aa 100644 --- a/arch/arm/include/asm/hardirq.h +++ b/arch/arm/include/asm/hardirq.h | |||
@@ -5,7 +5,7 @@ | |||
5 | #include <linux/threads.h> | 5 | #include <linux/threads.h> |
6 | #include <asm/irq.h> | 6 | #include <asm/irq.h> |
7 | 7 | ||
8 | #define NR_IPI 6 | 8 | #define NR_IPI 7 |
9 | 9 | ||
10 | typedef struct { | 10 | typedef struct { |
11 | unsigned int __softirq_pending; | 11 | unsigned int __softirq_pending; |
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h index fc82a88f5b69..1cf26010a6f3 100644 --- a/arch/arm/include/asm/mcpm.h +++ b/arch/arm/include/asm/mcpm.h | |||
@@ -81,10 +81,40 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); | |||
81 | * | 81 | * |
82 | * This will return if mcpm_platform_register() has not been called | 82 | * This will return if mcpm_platform_register() has not been called |
83 | * previously in which case the caller should take appropriate action. | 83 | * previously in which case the caller should take appropriate action. |
84 | * | ||
85 | * On success, the CPU is not guaranteed to be truly halted until | ||
86 | * mcpm_cpu_power_down_finish() subsequently returns non-zero for the | ||
87 | * specified cpu. Until then, other CPUs should make sure they do not | ||
88 | * trash memory the target CPU might be executing/accessing. | ||
84 | */ | 89 | */ |
85 | void mcpm_cpu_power_down(void); | 90 | void mcpm_cpu_power_down(void); |
86 | 91 | ||
87 | /** | 92 | /** |
93 | * mcpm_cpu_power_down_finish - wait for a specified CPU to halt, and | ||
94 | * make sure it is powered off | ||
95 | * | ||
96 | * @cpu: CPU number within given cluster | ||
97 | * @cluster: cluster number for the CPU | ||
98 | * | ||
99 | * Call this function to ensure that a pending powerdown has taken | ||
100 | * effect and the CPU is safely parked before performing non-mcpm | ||
101 | * operations that may affect the CPU (such as kexec trashing the | ||
102 | * kernel text). | ||
103 | * | ||
104 | * It is *not* necessary to call this function if you only need to | ||
105 | * serialise a pending powerdown with mcpm_cpu_power_up() or a wakeup | ||
106 | * event. | ||
107 | * | ||
108 | * Do not call this function unless the specified CPU has already | ||
109 | * called mcpm_cpu_power_down() or has committed to doing so. | ||
110 | * | ||
111 | * @return: | ||
112 | * - zero if the CPU is in a safely parked state | ||
113 | * - nonzero otherwise (e.g., timeout) | ||
114 | */ | ||
115 | int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster); | ||
116 | |||
117 | /** | ||
88 | * mcpm_cpu_suspend - bring the calling CPU in a suspended state | 118 | * mcpm_cpu_suspend - bring the calling CPU in a suspended state |
89 | * | 119 | * |
90 | * @expected_residency: duration in microseconds the CPU is expected | 120 | * @expected_residency: duration in microseconds the CPU is expected |
@@ -126,6 +156,7 @@ int mcpm_cpu_powered_up(void); | |||
126 | struct mcpm_platform_ops { | 156 | struct mcpm_platform_ops { |
127 | int (*power_up)(unsigned int cpu, unsigned int cluster); | 157 | int (*power_up)(unsigned int cpu, unsigned int cluster); |
128 | void (*power_down)(void); | 158 | void (*power_down)(void); |
159 | int (*power_down_finish)(unsigned int cpu, unsigned int cluster); | ||
129 | void (*suspend)(u64); | 160 | void (*suspend)(u64); |
130 | void (*powered_up)(void); | 161 | void (*powered_up)(void); |
131 | }; | 162 | }; |
diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h index f97ee02386ee..86a659a19526 100644 --- a/arch/arm/include/asm/pgtable-2level.h +++ b/arch/arm/include/asm/pgtable-2level.h | |||
@@ -181,6 +181,13 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) | |||
181 | 181 | ||
182 | #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) | 182 | #define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,pte,ext) |
183 | 183 | ||
184 | /* | ||
185 | * We don't have huge page support for short descriptors, for the moment | ||
186 | * define empty stubs for use by pin_page_for_write. | ||
187 | */ | ||
188 | #define pmd_hugewillfault(pmd) (0) | ||
189 | #define pmd_thp_or_huge(pmd) (0) | ||
190 | |||
184 | #endif /* __ASSEMBLY__ */ | 191 | #endif /* __ASSEMBLY__ */ |
185 | 192 | ||
186 | #endif /* _ASM_PGTABLE_2LEVEL_H */ | 193 | #endif /* _ASM_PGTABLE_2LEVEL_H */ |
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 5689c18c85f5..39c54cfa03e9 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h | |||
@@ -206,6 +206,9 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) | |||
206 | #define __HAVE_ARCH_PMD_WRITE | 206 | #define __HAVE_ARCH_PMD_WRITE |
207 | #define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY)) | 207 | #define pmd_write(pmd) (!(pmd_val(pmd) & PMD_SECT_RDONLY)) |
208 | 208 | ||
209 | #define pmd_hugewillfault(pmd) (!pmd_young(pmd) || !pmd_write(pmd)) | ||
210 | #define pmd_thp_or_huge(pmd) (pmd_huge(pmd) || pmd_trans_huge(pmd)) | ||
211 | |||
209 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 212 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
210 | #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) | 213 | #define pmd_trans_huge(pmd) (pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT)) |
211 | #define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) | 214 | #define pmd_trans_splitting(pmd) (pmd_val(pmd) & PMD_SECT_SPLITTING) |
diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index c50f05609501..8d6a089dfb76 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h | |||
@@ -49,7 +49,7 @@ extern struct meminfo meminfo; | |||
49 | #define bank_phys_end(bank) ((bank)->start + (bank)->size) | 49 | #define bank_phys_end(bank) ((bank)->start + (bank)->size) |
50 | #define bank_phys_size(bank) (bank)->size | 50 | #define bank_phys_size(bank) (bank)->size |
51 | 51 | ||
52 | extern int arm_add_memory(phys_addr_t start, phys_addr_t size); | 52 | extern int arm_add_memory(u64 start, u64 size); |
53 | extern void early_print(const char *str, ...); | 53 | extern void early_print(const char *str, ...); |
54 | extern void dump_machine_table(void); | 54 | extern void dump_machine_table(void); |
55 | 55 | ||
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index 4f2c28060c9a..ed6c22919e47 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h | |||
@@ -127,10 +127,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) | |||
127 | dsb_sev(); | 127 | dsb_sev(); |
128 | } | 128 | } |
129 | 129 | ||
130 | static inline int arch_spin_value_unlocked(arch_spinlock_t lock) | ||
131 | { | ||
132 | return lock.tickets.owner == lock.tickets.next; | ||
133 | } | ||
134 | |||
130 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) | 135 | static inline int arch_spin_is_locked(arch_spinlock_t *lock) |
131 | { | 136 | { |
132 | struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); | 137 | return !arch_spin_value_unlocked(ACCESS_ONCE(*lock)); |
133 | return tickets.owner != tickets.next; | ||
134 | } | 138 | } |
135 | 139 | ||
136 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) | 140 | static inline int arch_spin_is_contended(arch_spinlock_t *lock) |
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index 38960264040c..def9e570199f 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h | |||
@@ -560,37 +560,6 @@ static inline void __flush_bp_all(void) | |||
560 | asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero)); | 560 | asm("mcr p15, 0, %0, c7, c1, 6" : : "r" (zero)); |
561 | } | 561 | } |
562 | 562 | ||
563 | #include <asm/cputype.h> | ||
564 | #ifdef CONFIG_ARM_ERRATA_798181 | ||
565 | static inline int erratum_a15_798181(void) | ||
566 | { | ||
567 | unsigned int midr = read_cpuid_id(); | ||
568 | |||
569 | /* Cortex-A15 r0p0..r3p2 affected */ | ||
570 | if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2) | ||
571 | return 0; | ||
572 | return 1; | ||
573 | } | ||
574 | |||
575 | static inline void dummy_flush_tlb_a15_erratum(void) | ||
576 | { | ||
577 | /* | ||
578 | * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0. | ||
579 | */ | ||
580 | asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0)); | ||
581 | dsb(ish); | ||
582 | } | ||
583 | #else | ||
584 | static inline int erratum_a15_798181(void) | ||
585 | { | ||
586 | return 0; | ||
587 | } | ||
588 | |||
589 | static inline void dummy_flush_tlb_a15_erratum(void) | ||
590 | { | ||
591 | } | ||
592 | #endif | ||
593 | |||
594 | /* | 563 | /* |
595 | * flush_pmd_entry | 564 | * flush_pmd_entry |
596 | * | 565 | * |
@@ -697,4 +666,21 @@ extern void flush_bp_all(void); | |||
697 | 666 | ||
698 | #endif | 667 | #endif |
699 | 668 | ||
669 | #ifndef __ASSEMBLY__ | ||
670 | #ifdef CONFIG_ARM_ERRATA_798181 | ||
671 | extern void erratum_a15_798181_init(void); | ||
672 | #else | ||
673 | static inline void erratum_a15_798181_init(void) {} | ||
674 | #endif | ||
675 | extern bool (*erratum_a15_798181_handler)(void); | ||
676 | |||
677 | static inline bool erratum_a15_798181(void) | ||
678 | { | ||
679 | if (unlikely(IS_ENABLED(CONFIG_ARM_ERRATA_798181) && | ||
680 | erratum_a15_798181_handler)) | ||
681 | return erratum_a15_798181_handler(); | ||
682 | return false; | ||
683 | } | ||
684 | #endif | ||
685 | |||
700 | #endif | 686 | #endif |
diff --git a/arch/arm/include/debug/efm32.S b/arch/arm/include/debug/efm32.S new file mode 100644 index 000000000000..2265a199280c --- /dev/null +++ b/arch/arm/include/debug/efm32.S | |||
@@ -0,0 +1,45 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Pengutronix | ||
3 | * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #define UARTn_CMD 0x000c | ||
11 | #define UARTn_CMD_TXEN 0x0004 | ||
12 | |||
13 | #define UARTn_STATUS 0x0010 | ||
14 | #define UARTn_STATUS_TXC 0x0020 | ||
15 | #define UARTn_STATUS_TXBL 0x0040 | ||
16 | |||
17 | #define UARTn_TXDATA 0x0034 | ||
18 | |||
19 | .macro addruart, rx, tmp | ||
20 | ldr \rx, =(CONFIG_DEBUG_UART_PHYS) | ||
21 | |||
22 | /* | ||
23 | * enable TX. The driver might disable it to save energy. We | ||
24 | * don't care about disabling at the end as during debug power | ||
25 | * consumption isn't that important. | ||
26 | */ | ||
27 | ldr \tmp, =(UARTn_CMD_TXEN) | ||
28 | str \tmp, [\rx, #UARTn_CMD] | ||
29 | .endm | ||
30 | |||
31 | .macro senduart,rd,rx | ||
32 | strb \rd, [\rx, #UARTn_TXDATA] | ||
33 | .endm | ||
34 | |||
35 | .macro waituart,rd,rx | ||
36 | 1001: ldr \rd, [\rx, #UARTn_STATUS] | ||
37 | tst \rd, #UARTn_STATUS_TXBL | ||
38 | beq 1001b | ||
39 | .endm | ||
40 | |||
41 | .macro busyuart,rd,rx | ||
42 | 1001: ldr \rd, [\rx, UARTn_STATUS] | ||
43 | tst \rd, #UARTn_STATUS_TXC | ||
44 | bne 1001b | ||
45 | .endm | ||
diff --git a/arch/arm/include/debug/msm.S b/arch/arm/include/debug/msm.S index 9166e1bc470e..9d653d475903 100644 --- a/arch/arm/include/debug/msm.S +++ b/arch/arm/include/debug/msm.S | |||
@@ -46,6 +46,11 @@ | |||
46 | #define MSM_DEBUG_UART_PHYS 0x16440000 | 46 | #define MSM_DEBUG_UART_PHYS 0x16440000 |
47 | #endif | 47 | #endif |
48 | 48 | ||
49 | #ifdef CONFIG_DEBUG_MSM8974_UART | ||
50 | #define MSM_DEBUG_UART_BASE 0xFA71E000 | ||
51 | #define MSM_DEBUG_UART_PHYS 0xF991E000 | ||
52 | #endif | ||
53 | |||
49 | .macro addruart, rp, rv, tmp | 54 | .macro addruart, rp, rv, tmp |
50 | #ifdef MSM_DEBUG_UART_PHYS | 55 | #ifdef MSM_DEBUG_UART_PHYS |
51 | ldr \rp, =MSM_DEBUG_UART_PHYS | 56 | ldr \rp, =MSM_DEBUG_UART_PHYS |